Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

nn.py 9.7 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
  1. """
  2. Leras.
  3. like lighter keras.
  4. This is my lightweight neural network library written from scratch
  5. based on pure tensorflow without keras.
  6. Provides:
  7. + full freedom of tensorflow operations without keras model's restrictions
  8. + easy model operations like in PyTorch, but in graph mode (no eager execution)
  9. + convenient and understandable logic
  10. Reasons why we cannot import tensorflow or any tensorflow.sub modules right here:
  11. 1) program is changing env variables based on DeviceConfig before import tensorflow
  12. 2) multiprocesses will import tensorflow every spawn
  13. NCHW speed up training for 10-20%.
  14. """
  15. import os
  16. import sys
  17. import warnings
  18. warnings.simplefilter(action='ignore', category=FutureWarning)
  19. from pathlib import Path
  20. import numpy as np
  21. from core.interact import interact as io
  22. from .device import Devices
  23. class nn():
  24. current_DeviceConfig = None
  25. tf = None
  26. tf_sess = None
  27. tf_sess_config = None
  28. tf_default_device_name = None
  29. data_format = None
  30. conv2d_ch_axis = None
  31. conv2d_spatial_axes = None
  32. floatx = None
  33. @staticmethod
  34. def initialize(device_config=None, floatx="float32", data_format="NHWC"):
  35. if nn.tf is None:
  36. if device_config is None:
  37. device_config = nn.getCurrentDeviceConfig()
  38. nn.setCurrentDeviceConfig(device_config)
  39. # Manipulate environment variables before import tensorflow
  40. first_run = False
  41. if len(device_config.devices) != 0:
  42. if sys.platform[0:3] == 'win':
  43. # Windows specific env vars
  44. if all( [ x.name == device_config.devices[0].name for x in device_config.devices ] ):
  45. devices_str = "_" + device_config.devices[0].name.replace(' ','_')
  46. else:
  47. devices_str = ""
  48. for device in device_config.devices:
  49. devices_str += "_" + device.name.replace(' ','_')
  50. compute_cache_path = Path(os.environ['APPDATA']) / 'NVIDIA' / ('ComputeCache' + devices_str)
  51. if not compute_cache_path.exists():
  52. first_run = True
  53. compute_cache_path.mkdir(parents=True, exist_ok=True)
  54. os.environ['CUDA_CACHE_PATH'] = str(compute_cache_path)
  55. if first_run:
  56. io.log_info("Caching GPU kernels...")
  57. import tensorflow
  58. tf_version = tensorflow.version.VERSION
  59. #if tf_version is None:
  60. # tf_version = tensorflow.version.GIT_VERSION
  61. if tf_version[0] == 'v':
  62. tf_version = tf_version[1:]
  63. if tf_version[0] == '2':
  64. tf = tensorflow.compat.v1
  65. else:
  66. tf = tensorflow
  67. import logging
  68. # Disable tensorflow warnings
  69. tf_logger = logging.getLogger('tensorflow')
  70. tf_logger.setLevel(logging.ERROR)
  71. if tf_version[0] == '2':
  72. tf.disable_v2_behavior()
  73. nn.tf = tf
  74. # Initialize framework
  75. import core.leras.ops
  76. import core.leras.layers
  77. import core.leras.initializers
  78. import core.leras.optimizers
  79. import core.leras.models
  80. import core.leras.archis
  81. # Configure tensorflow session-config
  82. if len(device_config.devices) == 0:
  83. config = tf.ConfigProto(device_count={'GPU': 0})
  84. nn.tf_default_device_name = '/CPU:0'
  85. else:
  86. nn.tf_default_device_name = f'/{device_config.devices[0].tf_dev_type}:0'
  87. config = tf.ConfigProto()
  88. config.gpu_options.visible_device_list = ','.join([str(device.index) for device in device_config.devices])
  89. config.gpu_options.force_gpu_compatible = True
  90. config.gpu_options.allow_growth = True
  91. nn.tf_sess_config = config
  92. if nn.tf_sess is None:
  93. nn.tf_sess = tf.Session(config=nn.tf_sess_config)
  94. if floatx == "float32":
  95. floatx = nn.tf.float32
  96. elif floatx == "float16":
  97. floatx = nn.tf.float16
  98. else:
  99. raise ValueError(f"unsupported floatx {floatx}")
  100. nn.set_floatx(floatx)
  101. nn.set_data_format(data_format)
  102. @staticmethod
  103. def initialize_main_env():
  104. Devices.initialize_main_env()
  105. @staticmethod
  106. def set_floatx(tf_dtype):
  107. """
  108. set default float type for all layers when dtype is None for them
  109. """
  110. nn.floatx = tf_dtype
  111. @staticmethod
  112. def set_data_format(data_format):
  113. if data_format != "NHWC" and data_format != "NCHW":
  114. raise ValueError(f"unsupported data_format {data_format}")
  115. nn.data_format = data_format
  116. if data_format == "NHWC":
  117. nn.conv2d_ch_axis = 3
  118. nn.conv2d_spatial_axes = [1,2]
  119. elif data_format == "NCHW":
  120. nn.conv2d_ch_axis = 1
  121. nn.conv2d_spatial_axes = [2,3]
  122. @staticmethod
  123. def get4Dshape ( w, h, c ):
  124. """
  125. returns 4D shape based on current data_format
  126. """
  127. if nn.data_format == "NHWC":
  128. return (None,h,w,c)
  129. else:
  130. return (None,c,h,w)
  131. @staticmethod
  132. def to_data_format( x, to_data_format, from_data_format):
  133. if to_data_format == from_data_format:
  134. return x
  135. if to_data_format == "NHWC":
  136. return np.transpose(x, (0,2,3,1) )
  137. elif to_data_format == "NCHW":
  138. return np.transpose(x, (0,3,1,2) )
  139. else:
  140. raise ValueError(f"unsupported to_data_format {to_data_format}")
  141. @staticmethod
  142. def getCurrentDeviceConfig():
  143. if nn.current_DeviceConfig is None:
  144. nn.current_DeviceConfig = DeviceConfig.BestGPU()
  145. return nn.current_DeviceConfig
  146. @staticmethod
  147. def setCurrentDeviceConfig(device_config):
  148. nn.current_DeviceConfig = device_config
  149. @staticmethod
  150. def reset_session():
  151. if nn.tf is not None:
  152. if nn.tf_sess is not None:
  153. nn.tf.reset_default_graph()
  154. nn.tf_sess.close()
  155. nn.tf_sess = nn.tf.Session(config=nn.tf_sess_config)
  156. @staticmethod
  157. def close_session():
  158. if nn.tf_sess is not None:
  159. nn.tf.reset_default_graph()
  160. nn.tf_sess.close()
  161. nn.tf_sess = None
  162. @staticmethod
  163. def ask_choose_device_idxs(choose_only_one=False, allow_cpu=True, suggest_best_multi_gpu=False, suggest_all_gpu=False):
  164. devices = Devices.getDevices()
  165. if len(devices) == 0:
  166. return []
  167. all_devices_indexes = [device.index for device in devices]
  168. if choose_only_one:
  169. suggest_best_multi_gpu = False
  170. suggest_all_gpu = False
  171. if suggest_all_gpu:
  172. best_device_indexes = all_devices_indexes
  173. elif suggest_best_multi_gpu:
  174. best_device_indexes = [device.index for device in devices.get_equal_devices(devices.get_best_device()) ]
  175. else:
  176. best_device_indexes = [ devices.get_best_device().index ]
  177. best_device_indexes = ",".join([str(x) for x in best_device_indexes])
  178. io.log_info ("")
  179. if choose_only_one:
  180. io.log_info ("Choose one GPU idx.")
  181. else:
  182. io.log_info ("Choose one or several GPU idxs (separated by comma).")
  183. io.log_info ("")
  184. if allow_cpu:
  185. io.log_info ("[CPU] : CPU")
  186. for device in devices:
  187. io.log_info (f" [{device.index}] : {device.name}")
  188. io.log_info ("")
  189. while True:
  190. try:
  191. if choose_only_one:
  192. choosed_idxs = io.input_str("Which GPU index to choose?", best_device_indexes)
  193. else:
  194. choosed_idxs = io.input_str("Which GPU indexes to choose?", best_device_indexes)
  195. if allow_cpu and choosed_idxs.lower() == "cpu":
  196. choosed_idxs = []
  197. break
  198. choosed_idxs = [ int(x) for x in choosed_idxs.split(',') ]
  199. if choose_only_one:
  200. if len(choosed_idxs) == 1:
  201. break
  202. else:
  203. if all( [idx in all_devices_indexes for idx in choosed_idxs] ):
  204. break
  205. except:
  206. pass
  207. io.log_info ("")
  208. return choosed_idxs
  209. class DeviceConfig():
  210. @staticmethod
  211. def ask_choose_device(*args, **kwargs):
  212. return nn.DeviceConfig.GPUIndexes( nn.ask_choose_device_idxs(*args,**kwargs) )
  213. def __init__ (self, devices=None):
  214. devices = devices or []
  215. if not isinstance(devices, Devices):
  216. devices = Devices(devices)
  217. self.devices = devices
  218. self.cpu_only = len(devices) == 0
  219. @staticmethod
  220. def BestGPU():
  221. devices = Devices.getDevices()
  222. if len(devices) == 0:
  223. return nn.DeviceConfig.CPU()
  224. return nn.DeviceConfig([devices.get_best_device()])
  225. @staticmethod
  226. def WorstGPU():
  227. devices = Devices.getDevices()
  228. if len(devices) == 0:
  229. return nn.DeviceConfig.CPU()
  230. return nn.DeviceConfig([devices.get_worst_device()])
  231. @staticmethod
  232. def GPUIndexes(indexes):
  233. if len(indexes) != 0:
  234. devices = Devices.getDevices().get_devices_from_index_list(indexes)
  235. else:
  236. devices = []
  237. return nn.DeviceConfig(devices)
  238. @staticmethod
  239. def CPU():
  240. return nn.DeviceConfig([])
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...