Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

sg_model_utils.py 6.7 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
  1. import os
  2. import sys
  3. import socket
  4. import time
  5. from multiprocessing import Process
  6. from pathlib import Path
  7. from typing import Tuple, Union
  8. import torch
  9. from torch.utils.tensorboard import SummaryWriter
  10. from super_gradients.training.exceptions.dataset_exceptions import UnsupportedBatchItemsFormat
  11. # TODO: These utils should move to sg_model package as internal (private) helper functions
  12. def try_port(port):
  13. """
  14. try_port - Helper method for tensorboard port binding
  15. :param port:
  16. :return:
  17. """
  18. sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
  19. is_port_available = False
  20. try:
  21. sock.bind(("localhost", port))
  22. is_port_available = True
  23. except Exception as ex:
  24. print('Port ' + str(port) + ' is in use' + str(ex))
  25. sock.close()
  26. return is_port_available
  27. def launch_tensorboard_process(checkpoints_dir_path: str, sleep_postpone: bool = True, port: int = None) -> Tuple[Process, int]:
  28. """
  29. launch_tensorboard_process - Default behavior is to scan all free ports from 6006-6016 and try using them
  30. unless port is defined by the user
  31. :param checkpoints_dir_path:
  32. :param sleep_postpone:
  33. :param port:
  34. :return: tuple of tb process, port
  35. """
  36. logdir_path = str(Path(checkpoints_dir_path).parent.absolute())
  37. tb_cmd = 'tensorboard --logdir=' + logdir_path + ' --bind_all'
  38. if port is not None:
  39. tb_ports = [port]
  40. else:
  41. tb_ports = range(6006, 6016)
  42. for tb_port in tb_ports:
  43. if not try_port(tb_port):
  44. continue
  45. else:
  46. print('Starting Tensor-Board process on port: ' + str(tb_port))
  47. tensor_board_process = Process(target=os.system, args=([tb_cmd + ' --port=' + str(tb_port)]))
  48. tensor_board_process.daemon = True
  49. tensor_board_process.start()
  50. # LET THE TENSORBOARD PROCESS START
  51. if sleep_postpone:
  52. time.sleep(3)
  53. return tensor_board_process, tb_port
  54. # RETURNING IRRELEVANT VALUES
  55. print('Failed to initialize Tensor-Board process on port: ' + ', '.join(map(str, tb_ports)))
  56. return None, -1
  57. def init_summary_writer(tb_dir, checkpoint_loaded, user_prompt=False):
  58. """Remove previous tensorboard files from directory and launch a tensor board process"""
  59. # If the training is from scratch, Walk through destination folder and delete existing tensorboard logs
  60. user = ''
  61. if not checkpoint_loaded:
  62. for filename in os.listdir(tb_dir):
  63. if 'events' in filename:
  64. if not user_prompt:
  65. print('"{}" will not be deleted'.format(filename))
  66. continue
  67. while True:
  68. # Verify with user before deleting old tensorboard files
  69. user = input('\nOLDER TENSORBOARD FILES EXISTS IN EXPERIMENT FOLDER:\n"{}"\n'
  70. 'DO YOU WANT TO DELETE THEM? [y/n]'
  71. .format(filename)) if (user != 'n' or user != 'y') else user
  72. if user == 'y':
  73. os.remove('{}/{}'.format(tb_dir, filename))
  74. print('DELETED: {}!'.format(filename))
  75. break
  76. elif user == 'n':
  77. print('"{}" will not be deleted'.format(filename))
  78. break
  79. print('Unknown answer...')
  80. # Launch a tensorboard process
  81. return SummaryWriter(tb_dir)
  82. def add_log_to_file(filename, results_titles_list, results_values_list, epoch, max_epochs):
  83. """Add a message to the log file"""
  84. # -Note: opening and closing the file every time is in-efficient. It is done for experimental purposes
  85. with open(filename, 'a') as f:
  86. f.write('\nEpoch (%d/%d) - ' % (epoch, max_epochs))
  87. for result_title, result_value in zip(results_titles_list, results_values_list):
  88. if isinstance(result_value, torch.Tensor):
  89. result_value = result_value.item()
  90. f.write(result_title + ': ' + str(result_value) + '\t')
  91. def write_training_results(writer, results_titles_list, results_values_list, epoch):
  92. """Stores the training and validation loss and accuracy for current epoch in a tensorboard file"""
  93. for res_key, res_val in zip(results_titles_list, results_values_list):
  94. # USE ONLY LOWER-CASE LETTERS AND REPLACE SPACES WITH '_' TO AVOID MANY TITLES FOR THE SAME KEY
  95. corrected_res_key = res_key.lower().replace(' ', '_')
  96. writer.add_scalar(corrected_res_key, res_val, epoch)
  97. writer.flush()
  98. def write_hpms(writer, hpmstructs=[], special_conf={}):
  99. """Stores the training and dataset hyper params in the tensorboard file"""
  100. hpm_string = ""
  101. for hpm in hpmstructs:
  102. for key, val in hpm.__dict__.items():
  103. hpm_string += '{}: {} \n '.format(key, val)
  104. for key, val in special_conf.items():
  105. hpm_string += '{}: {} \n '.format(key, val)
  106. writer.add_text("Hyper_parameters", hpm_string)
  107. writer.flush()
  108. # TODO: This should probably move into datasets/datasets_utils.py?
  109. def unpack_batch_items(batch_items: Union[tuple, torch.Tensor]):
  110. """
  111. Adds support for unpacking batch items in train/validation loop.
  112. @param batch_items: (Union[tuple, torch.Tensor]) returned by the data loader, which is expected to be in one of
  113. the following formats:
  114. 1. torch.Tensor or tuple, s.t inputs = batch_items[0], targets = batch_items[1] and len(batch_items) = 2
  115. 2. tuple: (inputs, targets, additional_batch_items)
  116. where inputs are fed to the network, targets are their corresponding labels and additional_batch_items is a
  117. dictionary (format {additional_batch_item_i_name: additional_batch_item_i ...}) which can be accessed through
  118. the phase context under the attribute additional_batch_item_i_name, using a phase callback.
  119. @return: inputs, target, additional_batch_items
  120. """
  121. additional_batch_items = {}
  122. if len(batch_items) == 2:
  123. inputs, target = batch_items
  124. elif len(batch_items) == 3:
  125. inputs, target, additional_batch_items = batch_items
  126. else:
  127. raise UnsupportedBatchItemsFormat()
  128. return inputs, target, additional_batch_items
  129. def log_uncaught_exceptions(logger):
  130. """
  131. Makes logger log uncaught exceptions
  132. @param logger: logging.Logger
  133. @return: None
  134. """
  135. def handle_exception(exc_type, exc_value, exc_traceback):
  136. if issubclass(exc_type, KeyboardInterrupt):
  137. sys.__excepthook__(exc_type, exc_value, exc_traceback)
  138. return
  139. logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
  140. sys.excepthook = handle_exception
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...