Register
Login
Resources
Docs Blog Datasets Glossary Case Studies Tutorials & Webinars
Product
Data Engine LLMs Platform Enterprise
Pricing Explore
Connect to our Discord channel

learned_positional_embedding.py 1.4 KB

You have to be logged in to leave a comment. Sign In
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
  1. # Copyright (c) 2017-present, Facebook, Inc.
  2. # All rights reserved.
  3. #
  4. # This source code is licensed under the license found in the LICENSE file in
  5. # the root directory of this source tree. An additional grant of patent rights
  6. # can be found in the PATENTS file in the same directory.
  7. from torch.autograd import Variable
  8. import torch.nn as nn
  9. from fairseq import utils
  10. class LearnedPositionalEmbedding(nn.Embedding):
  11. """This module learns positional embeddings up to a fixed maximum size.
  12. Padding symbols are ignored, but it is necessary to specify whether padding
  13. is added on the left side (left_pad=True) or right side (left_pad=False).
  14. """
  15. def __init__(self, num_embeddings, embedding_dim, padding_idx, left_pad):
  16. super().__init__(num_embeddings, embedding_dim, padding_idx)
  17. self.left_pad = left_pad
  18. def forward(self, input, incremental_state=None):
  19. """Input is expected to be of size [bsz x seqlen]."""
  20. if incremental_state is not None:
  21. # positions is the same for every token when decoding a single step
  22. positions = input.data.new(1, 1).fill_(self.padding_idx + input.size(1))
  23. else:
  24. positions = utils.make_positions(input.data, self.padding_idx, self.left_pad)
  25. return super().forward(Variable(positions))
  26. def max_positions(self):
  27. """Maximum number of supported positions."""
  28. return self.num_embeddings - self.padding_idx - 1
Tip!

Press p or to see the previous file or, n or to see the next file

Comments

Loading...