Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

interactive.py 5.3 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
  1. #!/usr/bin/env python3 -u
  2. # Copyright (c) 2017-present, Facebook, Inc.
  3. # All rights reserved.
  4. #
  5. # This source code is licensed under the license found in the LICENSE file in
  6. # the root directory of this source tree. An additional grant of patent rights
  7. # can be found in the PATENTS file in the same directory.
  8. import numpy as np
  9. import sys
  10. import torch
  11. from collections import namedtuple
  12. from torch.autograd import Variable
  13. from fairseq import options, tokenizer, utils
  14. from fairseq.data import LanguagePairDataset
  15. from fairseq.sequence_generator import SequenceGenerator
  16. Batch = namedtuple('Batch', 'srcs tokens lengths')
  17. Translation = namedtuple('Translation', 'src_str hypos alignments')
  18. def buffered_read(buffer_size):
  19. buffer = []
  20. for src_str in sys.stdin:
  21. buffer.append(src_str.strip())
  22. if len(buffer) >= buffer_size:
  23. yield buffer
  24. buffer = []
  25. if len(buffer) > 0:
  26. yield buffer
  27. def make_batches(lines, batch_size, src_dict):
  28. tokens = [tokenizer.Tokenizer.tokenize(src_str, src_dict, add_if_not_exist=False).long() for src_str in lines]
  29. lengths = [t.numel() for t in tokens]
  30. indices = np.argsort(lengths)
  31. num_batches = np.ceil(len(indices) / batch_size)
  32. batches = np.array_split(indices, num_batches)
  33. for batch_idxs in batches:
  34. batch_toks = [tokens[i] for i in batch_idxs]
  35. batch_toks = LanguagePairDataset.collate_tokens(batch_toks, src_dict.pad(), src_dict.eos(),
  36. LanguagePairDataset.LEFT_PAD_SOURCE,
  37. move_eos_to_beginning=False)
  38. yield Batch(
  39. srcs=[lines[i] for i in batch_idxs],
  40. tokens=batch_toks,
  41. lengths=tokens[0].new([lengths[i] for i in batch_idxs]),
  42. ), batch_idxs
  43. def main(args):
  44. print(args)
  45. assert not args.sampling or args.nbest == args.beam, \
  46. '--sampling requires --nbest to be equal to --beam'
  47. assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
  48. '--max-sentences/--batch-size cannot be larger than --buffer-size'
  49. if args.buffer_size < 1:
  50. args.buffer_size = 1
  51. use_cuda = torch.cuda.is_available() and not args.cpu
  52. # Load ensemble
  53. print('| loading model(s) from {}'.format(', '.join(args.path)))
  54. models, model_args = utils.load_ensemble_for_inference(args.path, data_dir=args.data)
  55. src_dict, dst_dict = models[0].src_dict, models[0].dst_dict
  56. print('| [{}] dictionary: {} types'.format(model_args.source_lang, len(src_dict)))
  57. print('| [{}] dictionary: {} types'.format(model_args.target_lang, len(dst_dict)))
  58. # Optimize ensemble for generation
  59. for model in models:
  60. model.make_generation_fast_(
  61. beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
  62. )
  63. # Initialize generator
  64. translator = SequenceGenerator(
  65. models, beam_size=args.beam, stop_early=(not args.no_early_stop),
  66. normalize_scores=(not args.unnormalized), len_penalty=args.lenpen,
  67. unk_penalty=args.unkpen, sampling=args.sampling)
  68. if use_cuda:
  69. translator.cuda()
  70. # Load alignment dictionary for unknown word replacement
  71. # (None if no unknown word replacement, empty if no path to align dictionary)
  72. align_dict = utils.load_align_dict(args.replace_unk)
  73. def make_result(src_str, hypos):
  74. result = Translation(
  75. src_str='O\t{}'.format(src_str),
  76. hypos=[],
  77. alignments=[],
  78. )
  79. # Process top predictions
  80. for hypo in hypos[:min(len(hypos), args.nbest)]:
  81. hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
  82. hypo_tokens=hypo['tokens'].int().cpu(),
  83. src_str=src_str,
  84. alignment=hypo['alignment'].int().cpu(),
  85. align_dict=align_dict,
  86. dst_dict=dst_dict,
  87. remove_bpe=args.remove_bpe,
  88. )
  89. result.hypos.append('H\t{}\t{}'.format(hypo['score'], hypo_str))
  90. result.alignments.append('A\t{}'.format(' '.join(map(lambda x: str(utils.item(x)), alignment))))
  91. return result
  92. def process_batch(batch):
  93. tokens = batch.tokens
  94. lengths = batch.lengths
  95. if use_cuda:
  96. tokens = tokens.cuda()
  97. lengths = lengths.cuda()
  98. translations = translator.generate(
  99. Variable(tokens),
  100. Variable(lengths),
  101. maxlen=int(args.max_len_a * tokens.size(1) + args.max_len_b),
  102. )
  103. return [make_result(batch.srcs[i], t) for i, t in enumerate(translations)]
  104. if args.buffer_size > 1:
  105. print('| Sentence buffer size:', args.buffer_size)
  106. print('| Type the input sentence and press return:')
  107. for inputs in buffered_read(args.buffer_size):
  108. indices = []
  109. results = []
  110. for batch, batch_indices in make_batches(inputs, max(1, args.max_sentences or 1), src_dict):
  111. indices.extend(batch_indices)
  112. results += process_batch(batch)
  113. for i in np.argsort(indices):
  114. result = results[i]
  115. print(result.src_str)
  116. for hypo, align in zip(result.hypos, result.alignments):
  117. print(hypo)
  118. print(align)
  119. if __name__ == '__main__':
  120. parser = options.get_generation_parser(interactive=True)
  121. args = parser.parse_args()
  122. main(args)
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...