Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

tasks.py 5.4 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
  1. from hebrew_utils import NIKUD, YUD, VAV, ABG, N_VOWELS, idx2chr
  2. from tqdm.auto import tqdm
  3. import numpy as np
  4. import torch
  5. class KtivMaleTask:
  6. def __init__(self, tokenizer, model, device='cpu'):
  7. self.tokenizer = tokenizer
  8. self.model = model.to(device)
  9. self.device = device
  10. def _nikud2male_word(self, word, logits, sample=False, sample_thresh=0.1):
  11. if sample:
  12. probs = logits.softmax(axis=-1).cpu().numpy()
  13. # remove probabilities under sample_thresh and normalize
  14. probs = np.where(probs < 0.1, 0, probs)
  15. probs /= probs.sum(axis=-1)[:, None]
  16. output = ''
  17. for c, P in zip(word, probs[1:]):
  18. if c not in NIKUD:
  19. output += np.random.choice(['', YUD, VAV], p=P)
  20. output += c
  21. return output
  22. else:
  23. preds = logits.argmax(axis=-1).cpu().numpy()
  24. output = ''
  25. for c, L in zip(word, preds[1:]):
  26. if L == 1:
  27. output += YUD
  28. if L == 2:
  29. output += VAV
  30. if c not in NIKUD:
  31. output += c
  32. return output
  33. def _nikud2male_batch(self, batch, **kwargs):
  34. # if all words in batch are too small, model cannot process them so just return unchanged
  35. if all(len(word) <= 1 for word in batch):
  36. for word in batch:
  37. yield ''.join([c for c in word if c not in NIKUD])
  38. else:
  39. X = self.tokenizer(batch, return_tensors='pt', padding=True, truncation=True).to(self.device)
  40. logits = self.model(**X).logits.detach()
  41. for i, word in enumerate(batch):
  42. yield self._nikud2male_word(word, logits[i], **kwargs)
  43. def nikud2male(self, text, split=False, pbar=False, sample=False, sample_thresh=0.1, batch_size=64):
  44. """
  45. text: Hebrew text with nikud
  46. returns: Hebrew text in ktiv male without nikud
  47. """
  48. words = text.split(' ') if split else [text]
  49. batches = [[]]
  50. for word in words:
  51. if len(batches[-1]) < batch_size:
  52. batches[-1] += [word]
  53. else:
  54. batches += [[word]]
  55. outputs = [
  56. out
  57. for batch in (tqdm(batches) if pbar else batches)
  58. for out in self._nikud2male_batch(batch, sample=sample, sample_thresh=sample_thresh)
  59. ]
  60. return ' '.join(outputs)
  61. class NikudTask:
  62. def __init__(self, tokenizer, model, device='cpu', max_len=2046):
  63. self.tokenizer = tokenizer
  64. self.model = model.to(device)
  65. self.device = device
  66. self.max_len = max_len
  67. # Note: max_len is 2 less than model's input length 2048,
  68. # to account for BOS and EOS tokens
  69. def _decode_nikud_probs(self, probs, d_thresh=0.5, v_thresh=0.5, o_thresh=0.5):
  70. # probs: N_TARGET_LABELS probabilities for nikkud for a single character, or deletion (last prob)
  71. # Note: first N_VOWELS are mutually exclusive vowels
  72. # next are dagesh, shin dot, and sin dot
  73. # finally the deletion flag
  74. vowel_probs = probs[:N_VOWELS]
  75. other_probs = probs[N_VOWELS:-1]
  76. del_prob = probs[-1]
  77. maxvow = vowel_probs.max().item()
  78. argmaxvow = vowel_probs.argmax().item()
  79. if del_prob > d_thresh:
  80. return None # special symbol for deletion
  81. out = ''
  82. if maxvow > v_thresh:
  83. out += idx2chr[argmaxvow]
  84. for i, p in enumerate(other_probs):
  85. if p > o_thresh:
  86. out += idx2chr[N_VOWELS + i]
  87. return out
  88. def add_nikud(self, text, **kwargs):
  89. assert len(text) <= self.max_len, f'Input text cannot be longer than {self.max_len} characters.'
  90. X = self.tokenizer([text], return_tensors='pt').to(self.device)
  91. logits = self.model(**X).logits.detach()[0]
  92. probs = torch.sigmoid(logits)
  93. output = ''
  94. for i, char in enumerate(text):
  95. output += char
  96. if char in ABG:
  97. char_probs = probs[i + 1]
  98. decoded = self._decode_nikud_probs(char_probs, **kwargs)
  99. if decoded is None and len(output) > 0:
  100. output = output[:-1]
  101. else:
  102. output += decoded
  103. return output
  104. if __name__ == '__main__':
  105. from models import KtivMaleModel, UnikudModel
  106. from transformers import CanineTokenizer
  107. print('Loading tokenizer')
  108. tokenizer = CanineTokenizer.from_pretrained("google/canine-c")
  109. print('Loading KM model')
  110. model = KtivMaleModel.from_pretrained("google/canine-c", num_labels=3)
  111. print('Loading KM task')
  112. km_task = KtivMaleTask(tokenizer, model)
  113. print('KM task loaded')
  114. text = 'אָבִיב הוֹלֵךְ וּבָא אִתּוֹ רַק אֹשֶׁר וְשִׂמְחָה'
  115. print(text)
  116. print(km_task.nikud2male(text, split=True, pbar=True))
  117. print('Loading UNIKUD model')
  118. model = UnikudModel.from_pretrained("google/canine-c")
  119. print('Loading nikud task')
  120. n_task = NikudTask(tokenizer, model)
  121. print('Nikud task loaded')
  122. text = 'זאת דוגמא של טקסט לא מנוקד בעברית'
  123. print(text)
  124. print(n_task.add_nikud(text))
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...