Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

tuning.py 10 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
  1. from dataclasses import *
  2. from functools import cached_property
  3. import lightgbm as lgb
  4. import optuna
  5. from optuna import Trial
  6. from optuna.multi_objective import trial
  7. from optuna.multi_objective.study import MultiObjectiveStudy
  8. from optuna.multi_objective.trial import FrozenMultiObjectiveTrial
  9. from sklearn.base import TransformerMixin
  10. from sklearn.pipeline import Pipeline
  11. from yspecies.models import Metrics, ResultsCV, BasicCrossValidator
  12. from yspecies.partition import ExpressionPartitions
  13. from yspecies.utils import *
  14. @dataclass(frozen=True)
  15. class SpecializedTuningResults:
  16. '''
  17. Originally used with LightGBMTuner but than decided to get rid of it until bugs are fixed
  18. '''
  19. best_params: dict
  20. best_score: float
  21. def print_info(self):
  22. print("Best score:", self.best_score)
  23. best_params = self.best_params
  24. print("Best params:", best_params)
  25. print(" Params: ")
  26. for key, value in best_params.items():
  27. print(" {}: {}".format(key, value))
  28. @dataclass
  29. class LightTuner(TransformerMixin):
  30. '''
  31. It is somewhat buggy, see https://github.com/optuna/optuna/issues/1602#issuecomment-670937574
  32. I had to switch to GeneralTuner while they are fixing it
  33. '''
  34. time_budget_seconds: int
  35. parameters: Dict = field(default_factory=lambda: {
  36. 'boosting_type': 'dart',
  37. 'objective': 'regression',
  38. 'metric': 'huber'
  39. })
  40. num_boost_round: int = 500
  41. early_stopping_rounds = 5
  42. seed: int = 42
  43. def fit(self, partitions: ExpressionPartitions, y=None) -> Dict:
  44. cat = partitions.categorical_index if partitions.features.has_categorical else "auto"
  45. lgb_train = lgb.Dataset(partitions.X, partitions.Y, categorical_feature=cat, free_raw_data=False)
  46. tuner = optuna.integration.lightgbm.LightGBMTunerCV(
  47. self.parameters, lgb_train, verbose_eval=self.num_boost_round, folds=partitions.folds,
  48. time_budget=self.time_budget_seconds,
  49. num_boost_round=self.num_boost_round,
  50. early_stopping_rounds=self.early_stopping_rounds
  51. )
  52. tuner.tune_bagging()
  53. tuner.tune_feature_fraction()
  54. tuner.tune_min_data_in_leaf()
  55. tuner.tune_feature_fraction_stage2()
  56. tuner.run()
  57. return SpecializedTuningResults(tuner.best_params, tuner.best_score)
  58. @dataclass(frozen=True)
  59. class TuningResults:
  60. best_params: dict
  61. train_metrics: Metrics = None
  62. validation_metrics: Metrics = None
  63. @dataclass(frozen=True)
  64. class MultiObjectiveResults:
  65. best_trials: List[trial.FrozenMultiObjectiveTrial]
  66. all_trials: List[trial.FrozenMultiObjectiveTrial]
  67. @staticmethod
  68. def from_study(study: MultiObjectiveStudy):
  69. return MultiObjectiveResults(study.get_pareto_front_trials(), study.trials)
  70. @cached_property
  71. def best_params(self) -> List[Dict]:
  72. return [t.params for t in self.best_trials]
  73. def vals(self, i: int, in_all: bool = False):
  74. return [t.values[i] for t in self.all_trials if t is not None and t.values[i] is not None] if in_all else [t.values[i] for t in self.best_trials if t is not None and t.values[i] is not None]
  75. def best_trial_by(self, i: int = 0, maximize: bool = True, in_all: bool = False) -> FrozenMultiObjectiveTrial:
  76. num = np.argmax(self.vals(i, in_all)) if maximize else np.argmin(self.vals(i, in_all))
  77. return self.best_trials[num]
  78. def best_metrics_params_by(self, i: int = 0, maximize: bool = True, in_all: bool = False) -> Tuple:
  79. trial = self.best_trial_by(i, maximize, in_all)
  80. params = trial.params.copy()
  81. params["objective"] = "regression"
  82. params['metrics'] = ["l1", "l2", "huber"]
  83. return (trial.values, params)
  84. def best_trial_r2(self, in_all: bool = False) -> FrozenMultiObjectiveTrial:
  85. return self.best_trial_by(0, True, in_all = in_all)
  86. def best_metrics_params_r2(self, in_all: bool = False):
  87. return self.best_metrics_params_by(0, True, in_all = in_all)
  88. def best_trial_huber(self, in_all: bool = False) -> FrozenMultiObjectiveTrial:
  89. return self.best_trial_by(1, False, in_all = in_all)
  90. def best_metrics_params_huber(self, in_all: bool = False):
  91. return self.best_metrics_params_by(1, False, in_all = in_all)
  92. def best_trial_kendall_tau(self, in_all: bool = False) -> FrozenMultiObjectiveTrial:
  93. return self.best_trial_by(2, False, in_all = in_all)
  94. def best_metrics_params_kendall_tau(self, in_all: bool = False):
  95. return self.best_metrics_params_by(2, True, in_all = in_all)
  96. @cached_property
  97. def results(self) -> Dict:
  98. return [t.values for t in self.trials]
  99. @dataclass(frozen=False)
  100. class Tune(TransformerMixin):
  101. transformer: Union[Union[TransformerMixin, Pipeline], BasicCrossValidator]
  102. n_trials: int
  103. def objective_parameters(trial: Trial) -> dict:
  104. return {
  105. 'objective': 'regression',
  106. 'metric': {'mae', 'mse', 'huber'},
  107. 'verbosity': -1,
  108. 'boosting_type': trial.suggest_categorical('boosting_type', ['dart', 'gbdt']),
  109. 'lambda_l1': trial.suggest_uniform('lambda_l1', 0.01, 4.0),
  110. 'lambda_l2': trial.suggest_uniform('lambda_l2', 0.01, 4.0),
  111. 'max_leaves': trial.suggest_int("max_leaves", 15, 25),
  112. 'max_depth': trial.suggest_int('max_depth', 3, 8),
  113. 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.3, 1.0),
  114. 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.3, 1.0),
  115. 'learning_rate': trial.suggest_uniform('learning_rate', 0.01, 0.1),
  116. 'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 3, 8),
  117. 'drop_rate': trial.suggest_uniform('drop_rate', 0.1, 0.3),
  118. "verbose": -1
  119. }
  120. parameters_space: Callable[[Trial], float] = None
  121. study: MultiObjectiveStudy=field(default_factory=lambda: optuna.multi_objective.study.create_study(directions=['maximize', 'minimize', 'maximize']))
  122. multi_objective_results: MultiObjectiveResults = field(default_factory=lambda: None)
  123. threads: int = 1
  124. def fit(self, X, y=None):
  125. data = X
  126. def objective(trial: Trial):
  127. params = self.default_parameters(trial) if self.parameters_space is None else self.parameters_space(trial)
  128. result = self.transformer.fit_transform((data, params))
  129. if isinstance(result, ResultsCV):
  130. return result.last(self.metrics) if self.take_last else result.min(self.metrics)
  131. else:
  132. return result
  133. self.study.optimize(objective, show_progress_bar=False, n_trials=self.n_trials, n_jobs=self.threads, gc_after_trial=True)
  134. self.multi_objective_results = MultiObjectiveResults(self.study.get_pareto_front_trials(), self.study.get_trials())
  135. return self
  136. def transform(self, data: Any) -> MultiObjectiveResults:
  137. return self.multi_objective_results
  138. """
  139. @dataclass(frozen=True)
  140. class GeneralTuner(TransformerMixin):
  141. num_boost_round: int = 500
  142. seed: int = 42
  143. #time_budget_seconds: int = 600
  144. to_optimize: str = "huber"
  145. direction: str = "minimize"
  146. n_trials: int = 10
  147. n_jobs: int = -1
  148. num_boost_round_train: int = 1000
  149. repeats: int = 10
  150. study: Study = field(default_factory=lambda: optuna.create_study(direction='minimize'))
  151. parameters: Callable[[Trial], float] = None
  152. best_model: lgb.Booster = None
  153. best_params: dict = None
  154. def default_parameters(self, trial: Trial) -> Dict:
  155. return {
  156. 'objective': 'regression',
  157. 'metric': {'mae', 'mse', 'huber'},
  158. 'verbosity': -1,
  159. 'boosting_type': trial.suggest_categorical('boosting_type', ['dart', 'gbdt']),
  160. 'lambda_l1': trial.suggest_uniform('lambda_l1', 0.01, 4.0),
  161. 'lambda_l2': trial.suggest_uniform('lambda_l2', 0.01, 4.0),
  162. 'max_leaves': trial.suggest_int("max_leaves", 15, 40),
  163. 'max_depth': trial.suggest_int('max_depth', 3, 8),
  164. 'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0),
  165. 'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0),
  166. 'learning_rate': trial.suggest_uniform('learning_rate', 0.04, 0.2),
  167. 'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 4, 10),
  168. "verbose": -1
  169. }
  170. def cv(self, partitions: ExpressionPartitions, trial: Trial) -> Dict:
  171. params = self.default_parameters(trial) if self.parameters is None else self.parameters(trial)
  172. cross = CrossValidator(self.num_boost_round, self.seed, parameters=params)
  173. return cross.fit(partitions)
  174. def fit(self, partitions: ExpressionPartitions, y=None) -> dict:
  175. def objective(trial: Trial):
  176. values: np.ndarray = np.zeros(self.repeats)
  177. #for i in range(0, self.repeats):
  178. eval_hist = self.cv(partitions, trial)
  179. # values[i] = np.array(eval_hist[f"{self.to_optimize}-mean"]).min()
  180. return np.average(values)
  181. self.study.optimize(objective, show_progress_bar=False, n_trials=self.n_trials, n_jobs=self.n_jobs, gc_after_trial=True)
  182. self.best_params = self.study.best_params
  183. print(f"best_params: {self.best_params}")
  184. return self.best_params
  185. def transform(self, partitions: ExpressionPartitions) -> TuningResults:
  186. assert self.best_params is not None, "best params are not known - the model must be first fit!"
  187. if partitions.n_hold_out > 0:
  188. factory = ModelFactory(parameters=self.best_params)
  189. self.best_model = factory.regression_model(partitions.cv_merged_x,partitions.hold_out_x,
  190. partitions.cv_merged_y, partitions.hold_out_y,
  191. partitions.categorical_index, num_boost_round=self.num_boost_round_train)
  192. train_prediction = self.best_model.predict(partitions.cv_merged_x, num_iteration=self.best_model.best_iteration)
  193. test_prediction = self.best_model.predict(partitions.hold_out_x, num_iteration=self.best_model.best_iteration)
  194. train_metrics = Metrics.calculate(train_prediction, partitions.cv_merged_y)
  195. test_metrics = Metrics.calculate(test_prediction, partitions.hold_out_y)
  196. else:
  197. train_metrics = None
  198. test_metrics = None
  199. return TuningResults(self.study.best_params, train_metrics, test_metrics)
  200. """
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...