1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
|
- from transformers import CLIPVisionModel
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
- from dataclasses import dataclass
- @dataclass
- class VisionEncoderConfig:
- n_embd: int = 2048
- vision_tower_name: str = 'openai/clip-vit-large-patch14-336'
- grid_size: int = -1 # -1: no grid pooling, 0: take cls token, 1: global avg pooling, 2, 3, 4, ...: grid pooling
- class VisionEncoder(nn.Module):
- def __init__(self, args):
- super().__init__()
- self.args = args
- self.vit = CLIPVisionModel.from_pretrained(args.vision_tower_name)
- self.proj = nn.Linear(self.vit.config.hidden_size, args.n_embd, bias=False)
- def encode_images(self, images):
- B, N, C, H, W = images.shape
- images = images.view(B*N, C, H, W)
- image_features = self.vit(images).last_hidden_state
- L, D = image_features.shape[1], image_features.shape[2]
- # rerange [B*N, L, D] -> [B, N, L, D]
- image_features = image_features.view(B, N, L, D)[:, 0, :, :]
- image_features = self.grid_pooling(image_features)
- return self.proj(image_features)
-
- def grid_pooling(self, image_features):
- if self.args.grid_size == -1: # no grid pooling
- return image_features
- if self.args.grid_size == 0: # take cls token
- return image_features[:, 0:1, :]
- if self.args.grid_size == 1: # global avg pooling
- return image_features.mean(dim=1, keepdim=True)
- cls_features = image_features[:, 0:1, :]
- image_features = image_features[:, 1:, :] #drop cls token
- B, L, D = image_features.shape
- H_or_W = int(L**0.5)
- image_features = image_features.view(B, H_or_W, H_or_W, D)
- grid_stride = H_or_W // self.args.grid_size
- image_features = F.avg_pool2d(image_features.permute(0, 3, 1, 2),
- padding=0,
- kernel_size=grid_stride,
- stride=grid_stride)
- image_features = image_features.permute(0, 2, 3, 1).view(B, -1, D)
- return torch.cat((cls_features, image_features), dim=1)
|