Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

extract_top_norms.py 2.6 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
  1. import sys
  2. sys.path.append('src')
  3. import yaml
  4. from pickle_wrapper import unpickle, pickle_it
  5. from mcmc_norm_learning.environment import position
  6. from mcmc_norm_learning.robot_task_new import task
  7. from mcmc_norm_learning.algorithm_1_v4 import to_tuple
  8. from mcmc_norm_learning.mcmc_performance import performance
  9. from collections import Counter
  10. import operator
  11. import pickle
  12. import pickle_wrapper
  13. import pandas as pd
  14. import ast
  15. with open("params.yaml", 'r') as fd:
  16. params = yaml.safe_load(fd)
  17. colour_specific = params['colour_specific']
  18. shape_specific = params['shape_specific']
  19. target_area_parts = params['target_area'].replace(' ','').split(';')
  20. target_area_part0 = position(*map(float, target_area_parts[0].split(',')))
  21. target_area_part1 = position(*map(float, target_area_parts[1].split(',')))
  22. target_area = (target_area_part0, target_area_part1)
  23. the_task = task(colour_specific, shape_specific,target_area)
  24. true_expression = params['true_norm']['exp']
  25. posterior_sample = unpickle('data/posterior.pickle')
  26. learned_expressions=Counter(map(to_tuple, posterior_sample))
  27. n = 5
  28. top_norms_with_freq = learned_expressions.most_common(n)
  29. top_norms = list(map(operator.itemgetter(0), top_norms_with_freq))
  30. pickle_it(top_norms, 'data/top_norms.pickle')
  31. env = unpickle('data/env.pickle')
  32. exp_posterior_df = pd.read_csv('metrics/chain_posteriors.csv', usecols=['expression','log_posterior'])
  33. exp_posterior_df = exp_posterior_df.drop_duplicates()
  34. exp_posterior_df['post_rank'] = exp_posterior_df['log_posterior'].rank(method='dense',ascending=False)
  35. exp_posterior_df.sort_values('post_rank', inplace=True)
  36. exp_posterior_df['expression'] = exp_posterior_df['expression'].transform(ast.literal_eval)
  37. exp_posterior_df['expression'] = exp_posterior_df['expression'].transform(to_tuple)
  38. def log_posterior(exp, exp_lp_df):
  39. return exp_lp_df.loc[exp_lp_df['expression'] == exp]['log_posterior'].iloc[0]
  40. with open('metrics/precision_recall.txt', 'w') as f:
  41. f.write(f"Number of unique Norms in sequence={len(learned_expressions)}\n")
  42. f.write(f"Top {n} norms:\n")
  43. for expression,freq in top_norms_with_freq:
  44. f.write(f"Freq. {freq}, lp {log_posterior(expression, exp_posterior_df)}: ")
  45. f.write(f"{expression}\n")
  46. f.write("\n")
  47. # Calculate precision and recall of top_n norms from learned expressions
  48. pr_result=performance(the_task,env,true_expression,learned_expressions,
  49. folder_name="temp",file_name="top_norm",
  50. top_n=n,beta=1,repeat=100000,verbose=False)
  51. with open('metrics/precision_recall.txt', 'a') as f:
  52. f.write(pr_result.to_string())
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...