Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

training.py 4.5 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
  1. import dagshub
  2. import os
  3. import pandas as pd
  4. import yaml
  5. import re
  6. import numpy as np
  7. import joblib
  8. from scipy.sparse.dia import dia_matrix
  9. from sklearn.feature_extraction.text import TfidfVectorizer
  10. from sklearn.linear_model import SGDClassifier
  11. from sklearn.metrics import (
  12. roc_auc_score,
  13. average_precision_score,
  14. accuracy_score,
  15. precision_score,
  16. recall_score,
  17. f1_score,
  18. )
  19. with open(r"./general_params.yaml") as f:
  20. params = yaml.safe_load(f)
  21. with open(r"./training_params.yaml") as f:
  22. training_params = yaml.safe_load(f)
  23. NUM_COL_NAMES = ["title_len", "body_len", "hour", "minute", "dayofweek", "dayofyear"]
  24. CAT_COL_NAMES = [
  25. "has_thumbnail",
  26. "flair_Clickbait",
  27. "flair_Discussion",
  28. "flair_Inaccurate",
  29. "flair_Misleading",
  30. "flair_News",
  31. "flair_None",
  32. "flair_Project",
  33. "flair_Research",
  34. "flair_Shameless Self Promo",
  35. ]
  36. CHUNK_SIZE = params["chunk_size"]
  37. TARGET_LABEL = params["target_col"]
  38. MODEL_TYPE_TEXT = "model_text"
  39. MODEL_TYPE_NUM_CAT = "model_num_cat"
  40. MODEL_TYPE_OTHER = ""
  41. MODEL_TYPE = (
  42. MODEL_TYPE_TEXT
  43. if training_params["use_text_cols"]
  44. else MODEL_TYPE_NUM_CAT
  45. if training_params["use_number_category_cols"]
  46. else MODEL_TYPE_OTHER
  47. )
  48. local_path = "."
  49. train_df_path = "rML-train.csv"
  50. tfidf_path = "models/tfidf.pkl"
  51. clf_tfidf_path = "models/tfidf.pkl"
  52. clf_num_cat_path = "models/tfidf.pkl"
  53. # ----- Helper Functions -----
  54. # A partial fit for the TfidfVectorizer courtesy @maxymoo on Stack Overflow
  55. # https://stackoverflow.com/questions/39109743/adding-new-text-to-sklearn-tfidif-vectorizer-python/39114555#39114555
  56. def partial_fit(self, X):
  57. # If this is the first iteration, use regular fit
  58. if not hasattr(self, "is_initialized"):
  59. self.fit(X)
  60. self.n_docs = len(X)
  61. self.is_initialized = True
  62. else:
  63. max_idx = max(self.vocabulary_.values())
  64. for a in X:
  65. # update vocabulary_
  66. if self.lowercase:
  67. a = str(a).lower()
  68. tokens = re.findall(self.token_pattern, a)
  69. for w in tokens:
  70. if w not in self.vocabulary_:
  71. max_idx += 1
  72. self.vocabulary_[w] = max_idx
  73. # update idf_
  74. df = (self.n_docs + self.smooth_idf) / np.exp(
  75. self.idf_ - 1
  76. ) - self.smooth_idf
  77. self.n_docs += 1
  78. df.resize(len(self.vocabulary_))
  79. for w in tokens:
  80. df[self.vocabulary_[w]] += 1
  81. idf = np.log((self.n_docs + self.smooth_idf) / (df + self.smooth_idf)) + 1
  82. self._tfidf._idf_diag = dia_matrix((idf, 0), shape=(len(idf), len(idf)))
  83. # ----- End Helper Functions -----
  84. class TextModel:
  85. def __init__(self, random_state=42):
  86. self.clf = SGDClassifier(loss="log", random_state=random_state)
  87. print("Generate TFIDF features...")
  88. TfidfVectorizer.partial_fit = partial_fit
  89. self.tfidf = TfidfVectorizer(max_features=25000)
  90. for i, chunk in enumerate(
  91. pd.read_csv(os.path.join(remote_wfs, train_df_path), chunksize=CHUNK_SIZE)
  92. ):
  93. print(f"Training on chunk {i+1}...")
  94. self.tfidf.partial_fit(chunk["title_and_body"])
  95. print("TFIDF feature matrix created!")
  96. def train_on_chunk(self, chunk):
  97. df_y = chunk[TARGET_LABEL]
  98. tfidf_X = self.tfidf.transform(chunk["title_and_body"].values.astype('U'))
  99. self.clf.partial_fit(tfidf_X, df_y, classes=np.array([0, 1]))
  100. def save_model(self):
  101. joblib.dump(self.tfidf, os.path.join(local_path, tfidf_path))
  102. joblib.dump(self.clf, os.path.join(local_path, clf_tfidf_path))
  103. def get_remote_gs_wfs():
  104. print("Retreiving location of remote working file system...")
  105. stream = os.popen("dvc remote list --local")
  106. output = stream.read()
  107. remote_wfs_loc = output.split("\t")[1].split("\n")[0]
  108. return remote_wfs_loc
  109. def load_and_train(remote_wfs, model_type=None, random_state=42):
  110. print("Initializing models...")
  111. if model_type == MODEL_TYPE_TEXT:
  112. model = TextModel(random_state=random_state)
  113. else:
  114. # TODO
  115. return
  116. print("Training model...")
  117. for i, chunk in enumerate(
  118. pd.read_csv(os.path.join(remote_wfs, train_df_path), chunksize=CHUNK_SIZE)
  119. ):
  120. print(f"Training on chunk {i+1}...")
  121. model.train_on_chunk(chunk)
  122. print("Saving models locally...")
  123. model.save_model()
  124. if __name__ == "__main__":
  125. remote_wfs = get_remote_gs_wfs()
  126. load_and_train(remote_wfs, MODEL_TYPE)
  127. print("Loading and training done!")
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...