Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

label_smoothed_cross_entropy.py 2.8 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
  1. # Copyright (c) 2017-present, Facebook, Inc.
  2. # All rights reserved.
  3. #
  4. # This source code is licensed under the license found in the LICENSE file in
  5. # the root directory of this source tree. An additional grant of patent rights
  6. # can be found in the PATENTS file in the same directory.
  7. import math
  8. from fairseq import utils
  9. from . import FairseqCriterion, register_criterion
  10. @register_criterion('label_smoothed_cross_entropy')
  11. class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
  12. def __init__(self, args, src_dict, dst_dict):
  13. super().__init__(args, src_dict, dst_dict)
  14. self.eps = args.label_smoothing
  15. @staticmethod
  16. def add_args(parser):
  17. """Add criterion-specific arguments to the parser."""
  18. parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
  19. help='epsilon for label smoothing, 0 means no label smoothing')
  20. def forward(self, model, sample, reduce=True):
  21. """Compute the loss for the given sample.
  22. Returns a tuple with three elements:
  23. 1) the loss, as a Variable
  24. 2) the sample size, which is used as the denominator for the gradient
  25. 3) logging outputs to display while training
  26. """
  27. net_output = model(**sample['net_input'])
  28. lprobs = model.get_normalized_probs(net_output, log_probs=True)
  29. lprobs = lprobs.view(-1, lprobs.size(-1))
  30. target = model.get_targets(sample, net_output).view(-1, 1)
  31. non_pad_mask = target.ne(self.padding_idx)
  32. nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
  33. smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
  34. if reduce:
  35. nll_loss = nll_loss.sum()
  36. smooth_loss = smooth_loss.sum()
  37. eps_i = self.eps / lprobs.size(-1)
  38. loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
  39. sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
  40. logging_output = {
  41. 'loss': utils.item(loss.data) if reduce else loss.data,
  42. 'nll_loss': utils.item(nll_loss.data) if reduce else loss.data,
  43. 'ntokens': sample['ntokens'],
  44. 'sample_size': sample_size,
  45. }
  46. return loss, sample_size, logging_output
  47. @staticmethod
  48. def aggregate_logging_outputs(logging_outputs):
  49. """Aggregate logging outputs from data parallel training."""
  50. ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
  51. sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
  52. return {
  53. 'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2),
  54. 'nll_loss': sum(log.get('nll_loss', 0) for log in logging_outputs) / ntokens / math.log(2),
  55. 'sample_size': sample_size,
  56. }
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...