Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

partition.py 9.9 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
  1. from dataclasses import *
  2. from functools import cached_property
  3. from sklearn.base import TransformerMixin
  4. from yspecies.preprocess import EncodedFeatures, FeatureSelection
  5. from yspecies.utils import *
  6. import itertools
  7. from pathlib import Path
  8. from loguru import logger
  9. @dataclass(frozen=True)
  10. class PartitionParameters:
  11. n_folds: int
  12. n_hold_out: int
  13. species_in_validation: int = 2 # exclude species to validate them
  14. seed: int = None # random seed for partitioning
  15. @dataclass(frozen=True)
  16. class ExpressionPartitions:
  17. '''
  18. Class is used as results of SortedStratification, it can also do hold-outs
  19. '''
  20. data: EncodedFeatures
  21. X: pd.DataFrame
  22. Y: pd.DataFrame
  23. indexes: List[List[int]]
  24. validation_species: List[List[str]]
  25. n_hold_out: int = 0 # how many partitions we hold for checking validation
  26. seed: int = None # random seed (useful for debugging)
  27. @property
  28. def has_hold_out(self) -> bool:
  29. return self.n_hold_out > 0
  30. def write(self, folder: Path, name: str):
  31. folder.mkdir(exist_ok=True)
  32. for i, px in enumerate(self.partitions_x):
  33. px.to_csv(folder / f"{name}_X_{str(i)}.tsv", sep="\t", index_label="reference_gene")
  34. for i, py in enumerate(self.partitions_y):
  35. py.to_csv(folder / f"{name}_Y_{str(i)}.tsv", sep="\t", index_label="reference_gene")
  36. if self.n_hold_out > 0:
  37. self.hold_out_x.to_csv(folder / f"{name}_X_hold_out.tsv", sep="\t", index_label="reference_gene")
  38. self.hold_out_y.to_csv(folder / f"{name}_Y_hold_out.tsv", sep="\t", index_label="reference_gene")
  39. return folder
  40. @cached_property
  41. def n_folds(self) -> int:
  42. return len(self.indexes)
  43. @cached_property
  44. def n_cv_folds(self):
  45. return self.n_folds - self.n_hold_out
  46. @cached_property
  47. def cv_indexes(self):
  48. return self.indexes[0:self.n_cv_folds]
  49. @cached_property
  50. def hold_out_partition_indexes(self) -> List[List[int]]:
  51. return self.indexes[self.n_cv_folds:len(self.indexes)]
  52. @cached_property
  53. def hold_out_merged_index(self) -> List[int]:
  54. '''
  55. Hold out is required to check if cross-validation makes sense whe parameter tuning
  56. :return:
  57. '''
  58. return list(itertools.chain(*[pindex for pindex in self.hold_out_partition_indexes]))
  59. @cached_property
  60. def hold_out_species(self):
  61. return self.validation_species[self.n_cv_folds:len(self.indexes)]
  62. @cached_property
  63. def hold_out_merged_species(self):
  64. return list(itertools.chain(*self.hold_out_species))
  65. @cached_property
  66. def categorical_index(self):
  67. # temporaly making them auto
  68. return [ind for ind, c in enumerate(self.X.columns) if c in self.features.categorical]
  69. @property
  70. def folds(self):
  71. for ind in self.indexes:
  72. yield (ind, ind)
  73. @property
  74. def cv_folds(self):
  75. for ind in self.cv_indexes:
  76. yield (ind, ind)
  77. @cached_property
  78. def partitions_x(self) -> List[pd.DataFrame]:
  79. return [self.X.iloc[pindex] for pindex in self.cv_indexes]
  80. @cached_property
  81. def partitions_y(self) -> List[pd.DataFrame]:
  82. return [self.Y.iloc[pindex] for pindex in self.cv_indexes]
  83. @cached_property
  84. def cv_merged_index(self):
  85. return list(itertools.chain(*[pindex for pindex in self.cv_indexes]))
  86. @cached_property
  87. def cv_merged_x(self) -> pd.DataFrame:
  88. return self.X.iloc[self.cv_merged_index]
  89. @cached_property
  90. def cv_merged_y(self) -> pd.DataFrame:
  91. return self.Y.iloc[self.cv_merged_index]
  92. @cached_property
  93. def hold_out_x(self) -> pd.DataFrame:
  94. assert self.n_hold_out > 0, "current n_hold_out is 0 partitions, so no hold out data can be extracted!"
  95. return self.X.iloc[self.hold_out_merged_index]
  96. @cached_property
  97. def hold_out_y(self) -> pd.DataFrame:
  98. assert self.n_hold_out > 0, "current n_hold_out is 0 partitions, so no hold out data can be extracted!"
  99. return self.Y.iloc[self.hold_out_merged_index]
  100. @cached_property
  101. def species(self):
  102. return self.X['species'].values
  103. @cached_property
  104. def species_partitions(self):
  105. return [self.species[pindex] for pindex in self.indexes]
  106. @cached_property
  107. def X_T(self) -> pd.DataFrame:
  108. return self.X.T
  109. @property
  110. def features(self) -> FeatureSelection:
  111. return self.data.features
  112. def split_fold(self, i: int):
  113. X_train, y_train = self.fold_train(i)
  114. X_test = self.partitions_x[i]
  115. y_test = self.partitions_y[i]
  116. return X_train, X_test, y_train, y_test
  117. def fold_train(self, i: int):
  118. '''
  119. prepares train data for the fold
  120. :param i: number of parition
  121. :return: tuple with X and Y
  122. '''
  123. return pd.concat(self.partitions_x[:i] + self.partitions_x[i + 1:]), pd.concat(
  124. self.partitions_y[:i] + self.partitions_y[i + 1:])
  125. def __repr__(self):
  126. # to fix jupyter freeze (see https://github.com/ipython/ipython/issues/9771 )
  127. return self._repr_html_()
  128. def _repr_html_(self):
  129. return f"<table>" \
  130. f"<tr><th>partitions_X</th><th>partitions_Y</th></tr>" \
  131. f"<tr><td align='left'>[ {','.join([str(x.shape) for x in self.partitions_x])} ]</td>" \
  132. f"<td align='left'>[ {','.join([str(y.shape) for y in self.partitions_y])} ]</td></tr>" \
  133. f"<tr><th>show(X,10,10)</th><th>show(Y,10,10)</th></tr>" \
  134. f"<tr><td>{show(self.X, 10, 10)._repr_html_()}</td><td>{show(self.Y, 10, 10)._repr_html_()}</td></tr>" \
  135. f"</table>"
  136. @dataclass(frozen=True)
  137. class DataPartitioner(TransformerMixin):
  138. '''
  139. Partitions the data according to sorted stratification
  140. '''
  141. def fit(self, X, y=None) -> 'DataPartitioner':
  142. return self
  143. def transform(self, for_partition: Tuple[EncodedFeatures, PartitionParameters]) -> ExpressionPartitions:
  144. '''
  145. :param data: ExpressionDataset
  146. :param k: number of k-folds in sorted stratification
  147. :return: partitions
  148. '''
  149. assert isinstance(for_partition, Tuple) and len(
  150. for_partition) == 2, "partitioner should get the data to partition and partition parameters and have at least two elements"
  151. encoded_data, partition_params = for_partition
  152. assert isinstance(encoded_data.samples, pd.DataFrame), "Should contain extracted Pandas DataFrame with X and Y"
  153. if partition_params.seed is not None:
  154. import random
  155. random.seed(partition_params.seed)
  156. np.random.seed(partition_params.seed)
  157. return self.sorted_stratification(encoded_data, partition_params)
  158. def sorted_stratification(self, encodedFeatures: EncodedFeatures,
  159. partition_params: PartitionParameters) -> ExpressionPartitions:
  160. '''
  161. :param df:
  162. :param features:
  163. :param k:
  164. :param species_validation: number of species to leave only in validation set
  165. :return:
  166. '''
  167. df = encodedFeatures.samples
  168. features = encodedFeatures.features
  169. X = df.sort_values(by=[features.to_predict], ascending=False).drop(columns=features.categorical,
  170. errors="ignore")
  171. if partition_params.species_in_validation > 0:
  172. all_species = X.species[~X["species"].isin(features.not_validated_species)].drop_duplicates().values
  173. df_index = X.index
  174. # TODO: looks overly complicated (too many accumulating variables, refactor is needed)
  175. k_sets_indexes = []
  176. species_for_validation = []
  177. already_selected_species = []
  178. for i in range(partition_params.n_folds):
  179. index_set = []
  180. choices = []
  181. for j in range(partition_params.species_in_validation):
  182. choice = np.random.choice(all_species)
  183. while choice in already_selected_species:
  184. choice = np.random.choice(all_species)
  185. choices.append(choice)
  186. already_selected_species.append(choice)
  187. species_for_validation.append(choices)
  188. species = X['species'].values
  189. for j, c in enumerate(species):
  190. if c in choices:
  191. index_set.append(j)
  192. k_sets_indexes.append(index_set)
  193. partition_indexes = [[] for i in range(partition_params.n_folds)]
  194. i = 0
  195. index_of_sample = 0
  196. while i < (int(len(X) / partition_params.n_folds)):
  197. for j in range(partition_params.n_folds):
  198. partition_indexes[j].append((i * partition_params.n_folds) + j)
  199. index_of_sample = (i * partition_params.n_folds) + j
  200. i += 1
  201. index_of_sample += 1
  202. i = 0
  203. while index_of_sample < len(X):
  204. partition_indexes[i].append(index_of_sample)
  205. index_of_sample += 1
  206. i += 1
  207. # in X also have Y columns which we will separate to Y
  208. X_sorted = features.prepare_for_training(X.drop([features.to_predict], axis=1))
  209. # we had Y inside X with pretified name in features, fixing it in paritions
  210. Y_sorted = features.prepare_for_training(X[[features.to_predict]])
  211. if partition_params.species_in_validation > 0:
  212. for i, pindex in enumerate(partition_indexes):
  213. for j, sindex in enumerate(k_sets_indexes):
  214. if i == j:
  215. partition_indexes[i] = list(set(partition_indexes[i]).union(set(k_sets_indexes[j])))
  216. else:
  217. partition_indexes[i] = list(set(partition_indexes[i]).difference(set(k_sets_indexes[j])))
  218. return ExpressionPartitions(encodedFeatures, X_sorted, Y_sorted, partition_indexes, species_for_validation,
  219. n_hold_out=partition_params.n_hold_out, seed=partition_params.seed)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...