zhouzaida commited on
Commit
f5378d2
·
1 Parent(s): 0e5541e
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.png filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,41 @@
1
  ---
2
  license: mit
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
  ---
4
+
5
+
6
+ <div align="center">
7
+ <img width="30%" src="figures/logo.png">
8
+ </div>
9
+
10
+
11
+ ## Introduction
12
+
13
+ **MoonViT** is a Native-resolution Vision Encoder, which is initialized from and continually pre-trained on **SigLIP-SO-400M**.
14
+ To facilitate the standalone use of MoonViT, we have separated the implementation and weights of MoonViT from [moonshotai/Kimi-VL-A3B-Instruct](https://huggingface.co/moonshotai/Kimi-VL-A3B-Instruct).
15
+
16
+ If you are interested in the training process of MoonViT, you are welcome to read Paper [Kimi-VL Technical Report](https://huggingface.co/papers/2504.07491).
17
+
18
+ ## Example usage
19
+
20
+ ```python
21
+ from PIL import Image
22
+ from transformers import AutoModel, AutoImageProcessor
23
+
24
+ model_path = "moonshotai/MoonViT-SO-400M"
25
+ model = AutoModel.from_pretrained(
26
+ model_path,
27
+ torch_dtype="auto",
28
+ device_map="auto",
29
+ trust_remote_code=True,
30
+ )
31
+ processor = AutoImageProcessor.from_pretrained(model_path, trust_remote_code=True)
32
+
33
+ image_path = "./figures/demo.png"
34
+ image = Image.open(image_path)
35
+
36
+ images_processed = processor(image, return_tensors="pt").to(dtype=model.dtype, device=model.device)
37
+ image_features: list = model(images_processed.pixel_values, images_processed.image_grid_hws)
38
+
39
+ print(f"dtype: {image_features[0].dtype}, shape: {image_features[0].shape}")
40
+ # dtype: torch.bfloat16, shape: torch.Size([1092, 4, 1152])
41
+ ```
config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoConfig": "configuration_moonvit.MoonViTConfig",
4
+ "AutoModel": "modeling_moonvit.MoonVitPretrainedModel"
5
+ },
6
+ "model_type": "moonvit",
7
+ "patch_size": 14,
8
+ "num_attention_heads": 16,
9
+ "num_hidden_layers": 27,
10
+ "hidden_size": 1152,
11
+ "intermediate_size": 4304,
12
+ "init_pos_emb_height": 64,
13
+ "init_pos_emb_width": 64,
14
+ "merge_kernel_size": [
15
+ 2,
16
+ 2
17
+ ],
18
+ "torch_dtype": "bfloat16",
19
+ "transformers_version": "4.50.3"
20
+ }
configuration_moonvit.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+
3
+
4
+ class MoonViTConfig(PretrainedConfig):
5
+ model_type = "moonvit"
6
+
7
+ def __init__(
8
+ self,
9
+ patch_size: int = 14,
10
+ init_pos_emb_height: int = 64,
11
+ init_pos_emb_width: int = 64,
12
+ num_attention_heads: int = 16,
13
+ num_hidden_layers: int = 27,
14
+ hidden_size: int = 1152,
15
+ intermediate_size: int = 4304,
16
+ merge_kernel_size: tuple[int, int] = (2, 2),
17
+ **kwargs,
18
+ ):
19
+ super().__init__(**kwargs)
20
+ self.patch_size = patch_size
21
+ # Positional embedding config
22
+ self.init_pos_emb_height = init_pos_emb_height
23
+ self.init_pos_emb_width = init_pos_emb_width
24
+ # Transformer config
25
+ self.num_hidden_layers = num_hidden_layers
26
+ self.num_attention_heads = num_attention_heads
27
+ self.hidden_size = hidden_size
28
+ self.intermediate_size = intermediate_size
29
+ # Patch merger config
30
+ self.merge_kernel_size = merge_kernel_size
figures/demo.png ADDED

Git LFS Details

  • SHA256: 95de8765da89c41a2421f1c1fa3986e4d3c83793d92c8ade460a142b329d04c1
  • Pointer size: 131 Bytes
  • Size of remote file: 525 kB
figures/logo.png ADDED

Git LFS Details

  • SHA256: 7870b48105beb49cdb29bb3090abb7bbca688bef862507904c23d9c472df221c
  • Pointer size: 130 Bytes
  • Size of remote file: 13.1 kB
image_processing_moonvit.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Image processor class for KimiVL."""
2
+
3
+ import math
4
+ import numpy as np
5
+ from PIL import Image
6
+ from typing import Optional, Union
7
+
8
+ import torch
9
+ from torchvision.transforms import functional as TF
10
+ from transformers.image_utils import ImageInput, make_list_of_images, valid_images
11
+ from transformers.image_processing_utils import BaseImageProcessor, BatchFeature
12
+ from transformers.utils import TensorType
13
+
14
+
15
+ OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
16
+ OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
17
+
18
+
19
+ class MoonViTImageProcessor(BaseImageProcessor):
20
+ model_type = "moonvit"
21
+
22
+ def __init__(
23
+ self,
24
+ patch_size: int = 14,
25
+ pad_input: bool = False,
26
+ image_mean: tuple[float, float, float] = OPENAI_DATASET_MEAN,
27
+ image_std: tuple[float, float, float] = OPENAI_DATASET_STD,
28
+ in_token_limit: int = 4096,
29
+ merge_kernel_size: list[int, int] = [2, 2],
30
+ **kwargs,
31
+ ):
32
+ super().__init__(**kwargs)
33
+ self.in_token_limit = in_token_limit
34
+ self.patch_size = patch_size
35
+ self.pad_input = pad_input
36
+ self.image_mean = image_mean
37
+ self.image_std = image_std
38
+ self.merge_kernel_size = merge_kernel_size
39
+
40
+ def rescale(
41
+ self, image: Image.Image, merge_kernel_size: list[int, int] = [2, 2]
42
+ ) -> Image.Image:
43
+ w, h = image.size
44
+ patch_size = self.patch_size
45
+
46
+ if (w // patch_size) * (h // patch_size) > self.in_token_limit:
47
+ scale = math.sqrt(self.in_token_limit / ((w // patch_size) * (h // patch_size)))
48
+ new_w, new_h = int(w * scale), int(h * scale)
49
+ image = image.resize((new_w, new_h), Image.Resampling.BICUBIC)
50
+ if self.pad_input:
51
+ new_w, new_h = image.size
52
+ pad_size_h = merge_kernel_size[0] * patch_size
53
+ pad_size_w = merge_kernel_size[1] * patch_size
54
+
55
+ pad_h = (pad_size_h - new_h % pad_size_h) % pad_size_h
56
+ pad_w = (pad_size_w - new_w % pad_size_w) % pad_size_w
57
+
58
+ image = TF.pad(image, (0, 0, pad_w, pad_h))
59
+ else:
60
+ new_w, new_h = image.size
61
+ new_w = new_w - new_w % patch_size
62
+ new_h = new_h - new_h % patch_size
63
+ image = TF.center_crop(image, (new_h, new_w))
64
+
65
+ w, h = image.size
66
+ if w // patch_size >= 512 or h // patch_size >= 512:
67
+ raise ValueError("Exceed pos emb")
68
+
69
+ return image
70
+
71
+ def to_tensor(self, image: Image.Image) -> torch.Tensor:
72
+ return TF.to_tensor(image.convert("RGB"))
73
+
74
+ def normalize(self, image: torch.Tensor) -> torch.Tensor:
75
+ return TF.normalize(image, self.image_mean, self.image_std)
76
+
77
+ def patchify(self, image: torch.Tensor) -> tuple[torch.Tensor, list[int, int]]:
78
+ patch_size = self.patch_size
79
+ C, H, W = image.shape
80
+ patches = image.reshape(C, H // patch_size, patch_size, W // patch_size, patch_size)
81
+ patches = patches.permute(1, 3, 0, 2, 4)
82
+ patches = patches.contiguous().view(-1, C, patch_size, patch_size)
83
+ grid_hw = (H // patch_size, W // patch_size)
84
+ return patches, grid_hw
85
+
86
+ def _preprocess(self, image: ImageInput) -> tuple[torch.Tensor, list[int, int]]:
87
+ """
88
+ Preprocess image and patchify it.
89
+
90
+ Args:
91
+ image (`ImageInput`):
92
+ Image to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
93
+
94
+ Returns:
95
+ patches: torch.Tensor
96
+ grid_hw: list[int, int]
97
+ """
98
+ image = self.rescale(image, self.merge_kernel_size)
99
+ image = self.to_tensor(image)
100
+ image = self.normalize(image)
101
+ patches, grid_hw = self.patchify(image)
102
+ return patches, grid_hw
103
+
104
+ def preprocess(
105
+ self,
106
+ images: ImageInput,
107
+ return_tensors: Optional[Union[str, TensorType]] = None,
108
+ ) -> BatchFeature:
109
+ images = make_list_of_images(images)
110
+
111
+ if not valid_images(images):
112
+ raise ValueError(
113
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
114
+ "torch.Tensor, tf.Tensor or jax.ndarray."
115
+ )
116
+
117
+ pixel_values, image_grid_hws = [], []
118
+ for image in images:
119
+ patches, image_grid_hw = self._preprocess(image)
120
+ pixel_values.append(patches)
121
+ image_grid_hws.append(image_grid_hw)
122
+ pixel_values = torch.concat(pixel_values, dim=0)
123
+ image_grid_hws = np.array(image_grid_hws)
124
+ data = {"pixel_values": pixel_values, "image_grid_hws": image_grid_hws}
125
+
126
+ return BatchFeature(data=data, tensor_type=return_tensors)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a375216ea19430d70c8f68d4d205fae011f1b2ad9a124238bcd7006324e1fdde
3
+ size 833765656
modeling_moonvit.py ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from copy import deepcopy
3
+ from typing import Union, Tuple, Sequence, Optional, List
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ import torch.nn.functional as F
8
+ from transformers.activations import PytorchGELUTanh
9
+ from transformers.modeling_utils import PreTrainedModel
10
+ from transformers.utils import is_flash_attn_2_available
11
+
12
+ from .configuration_moonvit import MoonViTConfig
13
+
14
+ if is_flash_attn_2_available():
15
+ from flash_attn import flash_attn_varlen_func
16
+ else:
17
+ flash_attn_varlen_func = None
18
+
19
+
20
+ def multihead_attention(
21
+ q: torch.Tensor,
22
+ k: torch.Tensor,
23
+ v: torch.Tensor,
24
+ q_cu_seqlens: Optional[torch.Tensor] = None,
25
+ k_cu_seqlens: Optional[torch.Tensor] = None,
26
+ ):
27
+ """Multi-head attention using flash attention 2.
28
+
29
+ Args:
30
+ q, k, v: tensor of shape (batch_size, seqlen, num_heads, head_dim),
31
+ or (tot_seqlens, num_heads, head_dim) if packing.
32
+ q_cu_seqlens (torch.Tensor): cumulative sequence lengths of q.
33
+ The first element should be 0 and the last element should be q.shape[0].
34
+ k_cu_seqlens (torch.Tensor): cumulative sequence lengths of k.
35
+ The first element should be 0 and the last element should be k.shape[0].
36
+
37
+ Returns:
38
+ output: shape (batch_size, seqlen, dim) or (tot_seqlens, dim) if packing,
39
+ where dim = num_heads * head_dim
40
+ """
41
+ # Unified format legal check
42
+ assert q.dim() == k.dim() == v.dim() == 3, "q, k, v must have 3 dims"
43
+ assert q_cu_seqlens[-1] == q.shape[0], "q_cu_seqlens must sum to q.shape[0]"
44
+ assert (
45
+ k_cu_seqlens[-1] == k.shape[0] == v.shape[0]
46
+ ), "k_cu_seqlens must sum to k.shape[0]"
47
+ assert q.dtype in [
48
+ torch.bfloat16,
49
+ torch.float16,
50
+ ], f"unsupported dtype {q.dtype} for multihead attn"
51
+
52
+ max_seqlen_q = (q_cu_seqlens[1:] - q_cu_seqlens[:-1]).max().item()
53
+ max_seqlen_k = (k_cu_seqlens[1:] - k_cu_seqlens[:-1]).max().item()
54
+ attn_out = flash_attn_varlen_func(
55
+ q,
56
+ k,
57
+ v,
58
+ q_cu_seqlens,
59
+ k_cu_seqlens,
60
+ max_seqlen_q,
61
+ max_seqlen_k,
62
+ causal=False,
63
+ )
64
+ attn_out = attn_out.flatten(start_dim=-2)
65
+
66
+ return attn_out
67
+
68
+
69
+ def sdpa_attention(
70
+ q: torch.Tensor,
71
+ k: torch.Tensor,
72
+ v: torch.Tensor,
73
+ q_cu_seqlens: Optional[torch.Tensor] = None,
74
+ k_cu_seqlens: Optional[torch.Tensor] = None,
75
+ ) -> torch.Tensor:
76
+ """SDPA attention.
77
+
78
+ Args:
79
+ q, k, v: tensor of shape (batch_size, seqlen, num_heads, head_dim),
80
+ or (tot_seqlens, num_heads, head_dim) if packing.
81
+ """
82
+ seq_length = q.shape[0]
83
+ attention_mask = torch.zeros(
84
+ [1, seq_length, seq_length], device=q.device, dtype=torch.bool
85
+ )
86
+ for i in range(1, len(q_cu_seqlens)):
87
+ attention_mask[
88
+ ...,
89
+ q_cu_seqlens[i - 1] : q_cu_seqlens[i],
90
+ q_cu_seqlens[i - 1] : q_cu_seqlens[i],
91
+ ] = True
92
+ q = q.transpose(0, 1)
93
+ k = k.transpose(0, 1)
94
+ v = v.transpose(0, 1)
95
+ attn_output = F.scaled_dot_product_attention(q, k, v, attention_mask, dropout_p=0.0)
96
+ attn_output = attn_output.transpose(0, 1)
97
+ attn_output = attn_output.reshape(seq_length, -1)
98
+ return attn_output
99
+
100
+
101
+ def eager_attention(
102
+ q: torch.Tensor,
103
+ k: torch.Tensor,
104
+ v: torch.Tensor,
105
+ q_cu_seqlens: Optional[torch.Tensor] = None,
106
+ k_cu_seqlens: Optional[torch.Tensor] = None,
107
+ ) -> torch.Tensor:
108
+ seq_length = q.shape[0]
109
+ attention_mask = torch.zeros(
110
+ [1, seq_length, seq_length], device=q.device, dtype=torch.bool
111
+ )
112
+ for i in range(1, len(q_cu_seqlens)):
113
+ attention_mask[
114
+ ...,
115
+ q_cu_seqlens[i - 1] : q_cu_seqlens[i],
116
+ q_cu_seqlens[i - 1] : q_cu_seqlens[i],
117
+ ] = True
118
+ q = q.transpose(0, 1)
119
+ k = k.transpose(0, 1)
120
+ v = v.transpose(0, 1)
121
+
122
+ attn_weight = q @ k.transpose(-2, -1) / math.sqrt(q.shape[-1])
123
+ attn_weight += attention_mask
124
+ attn_weight = torch.softmax(attn_weight, dim=-1, dtype=torch.float32).to(q.dtype)
125
+
126
+ attn_output = attn_weight @ v
127
+ attn_output = attn_output.transpose(0, 1)
128
+ attn_output = attn_output.reshape(seq_length, -1)
129
+ return attn_output
130
+
131
+
132
+ VL_VISION_ATTENTION_FUNCTIONS = {
133
+ "flash_attention_2": multihead_attention,
134
+ "sdpa": sdpa_attention,
135
+ "eager": eager_attention,
136
+ }
137
+
138
+
139
+ def _apply_rope_input_validation(x, freqs_cis):
140
+ assert x.ndim == freqs_cis.ndim + 1, (x.shape, freqs_cis.shape)
141
+ assert x.shape[:-2] == freqs_cis.shape[:-1], (x.shape, freqs_cis.shape)
142
+ assert x.shape[-1] == 2 * freqs_cis.shape[-1], (x.shape, freqs_cis.shape)
143
+ assert freqs_cis.dtype == torch.complex64, freqs_cis.dtype
144
+
145
+
146
+ def apply_rope(
147
+ xq: torch.Tensor, xk: torch.Tensor, freqs_cis: torch.Tensor
148
+ ) -> tuple[torch.Tensor, torch.Tensor]:
149
+ """
150
+ Args: (The leading dimensions of all inputs should be the same)
151
+ xq: query, tensor of shape (..., num_heads, head_dim)
152
+ xk: key, tensor of shape (..., num_heads, head_dim)
153
+ freqs_cis: tensor of shape (..., head_dim/2), dtype=torch.complex64. It contains the precomputed cis(freqs) for each position in the 2D grid.
154
+ Returns:
155
+ xq_out, xk_out: tensors of shape (..., num_heads, head_dim)
156
+ """
157
+ _apply_rope_input_validation(xq, freqs_cis)
158
+ _apply_rope_input_validation(xk, freqs_cis)
159
+
160
+ freqs_cis = freqs_cis.unsqueeze(-2) # ..., 1, head_dim/2
161
+ # ..., num_heads, head_dim/2
162
+ xq_ = torch.view_as_complex(xq.float().view(*xq.shape[:-1], -1, 2))
163
+ xk_ = torch.view_as_complex(xk.float().view(*xq.shape[:-1], -1, 2))
164
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(-2) # ..., num_heads, head_dim
165
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(-2) # ..., num_heads, head_dim
166
+ return xq_out.type_as(xq), xk_out.type_as(xk)
167
+
168
+
169
+ class Learnable2DInterpPosEmb(nn.Module):
170
+ def __init__(
171
+ self, height: int, width: int, dim: int, interpolation_mode: str = "bicubic"
172
+ ) -> None:
173
+ super().__init__()
174
+ self.height = height
175
+ self.width = width
176
+ self.interpolation_mode = interpolation_mode
177
+ self.weight = nn.Parameter(torch.empty(height, width, dim))
178
+ self.reset_parameters()
179
+
180
+ def reset_parameters(self):
181
+ nn.init.normal_(self.weight)
182
+
183
+ def forward(self, x: torch.Tensor, grid_hws: torch.Tensor) -> torch.Tensor:
184
+ pos_embs = []
185
+ for shape in grid_hws.tolist():
186
+ if shape == self.weight.shape[:-1]:
187
+ pos_embs.append(self.weight.flatten(end_dim=1))
188
+ else:
189
+ pos_embs.append(
190
+ F.interpolate(
191
+ self.weight.permute((2, 0, 1)).unsqueeze(0),
192
+ size=shape,
193
+ mode=self.interpolation_mode,
194
+ )
195
+ .squeeze(0)
196
+ .permute((1, 2, 0))
197
+ .flatten(end_dim=1)
198
+ )
199
+ out = x + torch.cat(pos_embs)
200
+ return out
201
+
202
+
203
+ class MoonVisionPatchEmbed(nn.Module):
204
+
205
+ def __init__(
206
+ self,
207
+ out_dim: int,
208
+ in_dim: int = 3,
209
+ patch_size: Union[int, Tuple[int, int]] = (14, 14),
210
+ pos_emb_height: int = 14,
211
+ pos_emb_width: int = 14,
212
+ ):
213
+ super().__init__()
214
+ assert isinstance(
215
+ patch_size, (int, Sequence)
216
+ ), f"Invalid patch_size type: {type(patch_size)}"
217
+ if isinstance(patch_size, int):
218
+ patch_size = (patch_size, patch_size)
219
+ assert (
220
+ len(patch_size) == 2
221
+ ), f"Expected patch_size to be a tuple of 2, got {patch_size}"
222
+ self.patch_size = patch_size
223
+
224
+ self.proj = nn.Conv2d(
225
+ in_dim, out_dim, kernel_size=patch_size, stride=patch_size
226
+ )
227
+
228
+ self.pos_emb = Learnable2DInterpPosEmb(
229
+ height=pos_emb_height, width=pos_emb_width, dim=out_dim
230
+ )
231
+
232
+ def forward(self, x: torch.Tensor, grid_hws: torch.Tensor) -> torch.Tensor:
233
+ """
234
+ Args:
235
+ x (L, Channels): input tensor
236
+ grid_hws (N, 2): grid height and width
237
+
238
+ Returns:
239
+ (L, Cout) tensor
240
+ """
241
+ x = self.proj(x).view(x.size(0), -1)
242
+ # apply positional embedding
243
+ x = self.pos_emb(x, grid_hws)
244
+ return x
245
+
246
+
247
+ class Rope2DPosEmb(nn.Module):
248
+ """2D rotary position embedding with multi-resolution support.
249
+
250
+ This class is intended to be used in the following way:
251
+ 1. Before training, create an instance of Rope2DPosEmb. This instance will hold the precomputed cis.
252
+ 2. Before each forward pass, call `get_freqs_cis_by_*` to get the `freqs_cis` tensor for this iteration.
253
+ 3. During the forward pass, pass the `freqs_cis` tensor to each attention layer, and call `apply` just before each attention operation.
254
+ The rope is shared across all attention layers and all heads.
255
+
256
+ Refs:
257
+ - RoFormer: https://arxiv.org/abs/2104.09864
258
+ - VisionLLaMA: https://arxiv.org/abs/2403.00522
259
+ - https://github.com/Meituan-AutoML/VisionLLaMA/blob/main/dit/models.py
260
+
261
+ Args:
262
+ dim (int): usually the multi-head attention dimension, should be divisible by 4 (TODO: relax this constraint if needed)
263
+ max_height (int): the maximum height of the 2D grid
264
+ max_width (int): the maximum width of the 2D grid
265
+ theta_base (float): the base of the theta
266
+ device (str): the device to store the precomputed cis
267
+ """
268
+
269
+ def __init__(self, dim: int, max_height: int, max_width: int, theta_base=10000):
270
+ super().__init__()
271
+ self.dim = dim
272
+ assert self.dim % 4 == 0, "dim must be divisible by 4"
273
+ self.max_height = max_height
274
+ self.max_width = max_width
275
+ self.theta_base = theta_base
276
+
277
+ self.freqs_cis = None
278
+
279
+ def extra_repr(self):
280
+ return f"dim={self.dim}, max_height={self.max_height}, max_width={self.max_width}, theta_base={self.theta_base}"
281
+
282
+ def _precompute_freqs_cis(self, device: torch.device) -> torch.Tensor:
283
+ """Calculate the cis(freqs) for each position in the 2D grid.
284
+
285
+ Return: complex tensor of shape (max_height, max_width, dim//2) and value:
286
+ height axis: ret[h, w, 2*i] = cis(h * theta_base**(-4*i/dim))
287
+ weight axis: ret[h, w, 2*i+1] = cis(w * theta_base**(-4*i/dim)) with (i in [0, dim//4))
288
+ note: `cis` is a mathematical notation defined by cis x = cos x + i sin x,
289
+ """
290
+ N = self.max_height * self.max_width
291
+ flat_pos = torch.arange(0, N).float().to(device)
292
+ x_pos = flat_pos % self.max_width
293
+ y_pos = flat_pos // self.max_width
294
+ dim_range = (
295
+ torch.arange(0, self.dim, 4)[: (self.dim // 4)].float().to(device)
296
+ ) # C/4
297
+ freqs = 1.0 / (self.theta_base ** (dim_range / self.dim))
298
+ x_freqs = torch.outer(x_pos, freqs).float() # N, C/4
299
+ y_freqs = torch.outer(y_pos, freqs).float() # N, C/4
300
+ x_cis = torch.polar(torch.ones_like(x_freqs), x_freqs) # N, C/4
301
+ y_cis = torch.polar(torch.ones_like(y_freqs), y_freqs) # N, C/4
302
+ # N, C/4, 2
303
+ freqs_cis = torch.cat(
304
+ [x_cis.unsqueeze(dim=-1), y_cis.unsqueeze(dim=-1)], dim=-1
305
+ )
306
+ # max_height, max_width, C/2
307
+ freqs_cis = freqs_cis.reshape(self.max_height, self.max_width, -1)
308
+ return freqs_cis
309
+
310
+ def get_freqs_cis(self, grid_hws: torch.Tensor) -> torch.Tensor:
311
+ """
312
+ Args:
313
+ grid_hws (torch.Tensor): grid height and width
314
+
315
+ Returns:
316
+ freqs_cis: tensor of shape (sum(t * height * width), dim//2)
317
+ """
318
+ if self.freqs_cis is None:
319
+ self.freqs_cis = self._precompute_freqs_cis(grid_hws.device)
320
+
321
+ shapes = grid_hws.tolist()
322
+ assert all(
323
+ 1 <= h <= self.max_height and 1 <= w <= self.max_width for h, w in shapes
324
+ ), (
325
+ shapes,
326
+ self.max_height,
327
+ self.max_width,
328
+ )
329
+ freqs_cis = torch.cat(
330
+ [self.freqs_cis[:h, :w].reshape(-1, self.dim // 2) for h, w in shapes],
331
+ dim=0,
332
+ )
333
+ return freqs_cis
334
+
335
+
336
+ class MLP2(nn.Module):
337
+ """
338
+ Args:
339
+ dims: [in_dim, hidden_dim, out_dim]
340
+ bias: whether to use bias in linear layer.
341
+ """
342
+
343
+ def __init__(self, dims: list[int], activation, bias=True):
344
+ super().__init__()
345
+ assert len(dims) == 3
346
+ self.fc0 = nn.Linear(dims[0], dims[1], bias=bias)
347
+ self.fc1 = nn.Linear(dims[1], dims[2], bias=bias)
348
+ self.activation = activation
349
+ for m in [self.fc0, self.fc1]:
350
+ nn.init.trunc_normal_(m.weight, std=math.sqrt(2 / m.in_features))
351
+ if m.bias is not None:
352
+ nn.init.zeros_(m.bias)
353
+
354
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
355
+ x = self.fc0(x)
356
+ x = self.activation(x)
357
+ return self.fc1(x)
358
+
359
+
360
+ class MoonVitEncoderLayer(nn.Module):
361
+
362
+ def __init__(
363
+ self,
364
+ num_heads: int,
365
+ hidden_dim: int,
366
+ mlp_dim: int,
367
+ *,
368
+ attn_implementation: str = "eager",
369
+ activation=F.gelu,
370
+ attn_bias: bool = False,
371
+ ):
372
+ super().__init__()
373
+ self.num_heads = num_heads
374
+ self.hidden_dim = hidden_dim
375
+ self.hidden_size_per_attention_head = self.hidden_dim // self.num_heads
376
+ self.attn_implementation = attn_implementation
377
+
378
+ self.norm0 = nn.LayerNorm(hidden_dim)
379
+ self.norm1 = nn.LayerNorm(hidden_dim)
380
+ self.mlp = MLP2([hidden_dim, mlp_dim, hidden_dim], activation)
381
+ self.wqkv = nn.Linear(hidden_dim, hidden_dim * 3, bias=attn_bias)
382
+ self.wo = nn.Linear(hidden_dim, hidden_dim, bias=attn_bias)
383
+
384
+ def attention_qkvpacked(
385
+ self,
386
+ x: torch.Tensor,
387
+ cu_seqlens: torch.Tensor,
388
+ rope_freqs_cis: Optional[torch.Tensor] = None,
389
+ ):
390
+ """
391
+ Args:
392
+ x (torch.Tensor): (batch_size, seqlen, hidden_dim)
393
+ cu_seqlens (torch.Tensor):
394
+ """
395
+ xqkv = self.wqkv(x)
396
+
397
+ qkv_shape = xqkv.size()[:-1] + (
398
+ 3,
399
+ self.num_heads,
400
+ self.hidden_size_per_attention_head,
401
+ )
402
+ # xqkv: (batch_size, seqlen, 3, nheads, headdim)
403
+ xqkv = xqkv.view(*qkv_shape)
404
+ xq, xk, xv = torch.unbind(xqkv, dim=-3)
405
+
406
+ xq, xk = apply_rope(xq, xk, rope_freqs_cis)
407
+
408
+ attn_func = VL_VISION_ATTENTION_FUNCTIONS[self.attn_implementation]
409
+ attn_out = attn_func(
410
+ xq, xk, xv, q_cu_seqlens=cu_seqlens, k_cu_seqlens=cu_seqlens
411
+ )
412
+
413
+ attn_out = self.wo(attn_out)
414
+ return attn_out
415
+
416
+ def forward(
417
+ self,
418
+ hidden_states: torch.Tensor,
419
+ cu_seqlens: torch.Tensor,
420
+ rope_freqs_cis: Union[torch.Tensor, None] = None,
421
+ ) -> torch.Tensor:
422
+ """
423
+ Args:
424
+ hidden_states: non-packed (B, N, D) or packed (L, D). if non-packed, seqlens should be None, if packed, seqlens should be set
425
+
426
+ Returns:
427
+ output: same shape of input, non-packed (B, N, D) for non-packed input, (L, D) for packed input
428
+ """
429
+ residual = hidden_states
430
+ hidden_states = self.norm0(hidden_states)
431
+ attn_out = self.attention_qkvpacked(
432
+ hidden_states, cu_seqlens, rope_freqs_cis=rope_freqs_cis
433
+ )
434
+ hidden_states = residual + attn_out
435
+
436
+ residual = hidden_states
437
+ hidden_states = self.mlp(self.norm1(hidden_states))
438
+ hidden_states = residual + hidden_states
439
+ return hidden_states
440
+
441
+
442
+ class MoonVitEncoder(nn.Module):
443
+
444
+ def __init__(
445
+ self,
446
+ hidden_dim: int,
447
+ num_layers: int,
448
+ block_cfg: dict,
449
+ ) -> None:
450
+ super().__init__()
451
+
452
+ self.rope_2d = Rope2DPosEmb(
453
+ block_cfg["hidden_dim"] // block_cfg["num_heads"], 512, 512
454
+ )
455
+ self.blocks = nn.ModuleList(
456
+ [MoonVitEncoderLayer(**block_cfg) for _ in range(num_layers)]
457
+ )
458
+ self.final_layernorm = nn.LayerNorm(hidden_dim)
459
+
460
+ def forward(
461
+ self, hidden_states: torch.Tensor, grid_hws: torch.Tensor
462
+ ) -> torch.Tensor:
463
+ rope_freqs_cis = self.rope_2d.get_freqs_cis(grid_hws=grid_hws)
464
+
465
+ lengths = torch.cat(
466
+ (
467
+ torch.zeros(1, device=hidden_states.device, dtype=grid_hws.dtype),
468
+ grid_hws[:, 0] * grid_hws[:, 1],
469
+ )
470
+ )
471
+ cu_seqlens = lengths.cumsum(dim=0, dtype=torch.int32)
472
+
473
+ for _, block in enumerate(self.blocks):
474
+ hidden_states = block(
475
+ hidden_states, cu_seqlens, rope_freqs_cis=rope_freqs_cis
476
+ )
477
+
478
+ hidden_states = self.final_layernorm(hidden_states)
479
+
480
+ return hidden_states
481
+
482
+
483
+ def patch_merger(
484
+ x: torch.Tensor,
485
+ grid_hws: torch.Tensor,
486
+ merge_kernel_size: list[int, int] = (2, 2),
487
+ ) -> List[torch.Tensor]:
488
+ d_model = x.size(-1)
489
+
490
+ outputs = []
491
+ pre_sum = 0
492
+ for x_shape in grid_hws.tolist():
493
+ height, width = x_shape[0], x_shape[1]
494
+ # Get the current sequence
495
+ seq = x[pre_sum : pre_sum + height * width]
496
+ # Reshape along self.merge_kernel_size and concat to the last dimension
497
+ kernel_height, kernel_width = merge_kernel_size
498
+ new_height, new_width = height // kernel_height, width // kernel_width
499
+ reshaped_seq = seq.view(
500
+ new_height, kernel_height, new_width, kernel_width, d_model
501
+ )
502
+ reshaped_seq = reshaped_seq.permute(0, 2, 1, 3, 4).contiguous()
503
+ padded_seq = reshaped_seq.view(
504
+ new_height * new_width, kernel_height * kernel_width, -1
505
+ )
506
+ outputs.append(padded_seq)
507
+ pre_sum += height * width
508
+
509
+ return outputs
510
+
511
+
512
+ class MoonVitPretrainedModel(PreTrainedModel):
513
+ config_class = MoonViTConfig
514
+ model_type = "moonvit"
515
+ _no_split_modules = ["PackingTransformer"]
516
+ _supports_flash_attn_2 = True
517
+ _supports_sdpa = True
518
+
519
+ def __init__(self, config: MoonViTConfig, *inputs, **kwargs):
520
+ super().__init__(config, *inputs, **kwargs)
521
+ config = deepcopy(config)
522
+ self.merge_kernel_size = config.merge_kernel_size
523
+ self.patch_size = config.patch_size
524
+ self.patch_embed = MoonVisionPatchEmbed(
525
+ out_dim=config.hidden_size,
526
+ patch_size=config.patch_size,
527
+ pos_emb_height=config.init_pos_emb_height,
528
+ pos_emb_width=config.init_pos_emb_width,
529
+ )
530
+
531
+ self.encoder = MoonVitEncoder(
532
+ hidden_dim=config.hidden_size,
533
+ num_layers=config.num_hidden_layers,
534
+ block_cfg={
535
+ "num_heads": config.num_attention_heads,
536
+ "hidden_dim": config.hidden_size,
537
+ "mlp_dim": config.intermediate_size,
538
+ "activation": PytorchGELUTanh(),
539
+ "attn_bias": True,
540
+ "attn_implementation": config._attn_implementation,
541
+ },
542
+ )
543
+
544
+ def forward(
545
+ self, pixel_values: torch.Tensor, grid_hws: torch.Tensor
546
+ ) -> torch.Tensor:
547
+ """
548
+ Args:
549
+ pixel_values (torch.Tensor): The input pixel values.
550
+ grid_hws (torch.Tensor): The grid height and width.
551
+
552
+ Returns:
553
+ torch.Tensor: The output tokens.
554
+ """
555
+ hidden_states = self.patch_embed(pixel_values, grid_hws)
556
+ hidden_states = self.encoder(hidden_states, grid_hws)
557
+ hidden_states = patch_merger(
558
+ hidden_states, grid_hws, merge_kernel_size=self.merge_kernel_size
559
+ )
560
+ return hidden_states
preprocessor_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_map": {
3
+ "AutoImageProcessor": "image_processing_moonvit.MoonViTImageProcessor"
4
+ },
5
+ "in_token_limit": 4096,
6
+ "patch_size": 14,
7
+ "num_pooled_tokens": 1024,
8
+ "image_mean": [
9
+ 0.5,
10
+ 0.5,
11
+ 0.5
12
+ ],
13
+ "image_std": [
14
+ 0.5,
15
+ 0.5,
16
+ 0.5
17
+ ],
18
+ "pad_input": true
19
+ }