-
Notifications
You must be signed in to change notification settings - Fork 741
[Metax][Optimization] Optimize PaddleOCR-VL vision path on Metax GPU #7619
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -127,7 +127,11 @@ def forward( | |
| cos_emb: Optional[paddle.Tensor] = None, # (cos, sin) | ||
| sin_emb: Optional[paddle.Tensor] = None, # (cos, sin) | ||
| ): | ||
| B, seq_length, D = hidden_states.shape | ||
| if hidden_states.dim() == 3: | ||
| assert hidden_states.shape[0] == 1, f"SiglipAttention only supports batch=1, got {hidden_states.shape}" | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ❓ 疑问 建议改为: if hidden_states.shape[0] != 1:
raise ValueError(f"SiglipAttention only supports batch=1, got {hidden_states.shape}") |
||
| hidden_states = hidden_states[0] | ||
|
|
||
| seq_length, D = hidden_states.shape | ||
| qkv = self.qkv_proj(hidden_states) | ||
| q, k, v = neox_rope_embedding(qkv, cos_emb, sin_emb, self.num_heads, self.head_dim) | ||
| attn_output = self.flash_attn_func( | ||
|
|
@@ -255,25 +259,26 @@ def forward( | |
| flatten_image_grid_thw = self.flatten_list(image_grid_thw) | ||
| flatten_image_grid_thw = np.array(flatten_image_grid_thw) | ||
| assert batch_size == 1 | ||
| start = 0 | ||
|
|
||
| assert sum([np.prod(x) for x in flatten_image_grid_thw]) == embeddings.shape[1], ( | ||
| flatten_image_grid_thw, | ||
| embeddings.shape, | ||
| ) | ||
| embeddings = embeddings.squeeze(0) | ||
| tmp_embeddings = list() | ||
| for image_grid in image_grid_thw: | ||
| t, h, w = image_grid | ||
| end = start + t * h * w | ||
| image_embeddings = embeddings[int(start) : int(end), :] | ||
| position_embedding = ( | ||
| self.interpolate_pos_encoding(image_embeddings, h, w, True).squeeze(0).tile((t, 1)) | ||
| ).astype(image_embeddings.dtype) | ||
| image_embeddings = image_embeddings + position_embedding | ||
| tmp_embeddings.append(image_embeddings) | ||
| start = end | ||
| embeddings = paddle.concat(tmp_embeddings, axis=0).unsqueeze(0) | ||
| packed_position_embeddings = [] | ||
| for t, h, w in flatten_image_grid_thw: | ||
| t, h, w = map(int, (t, h, w)) | ||
| position_embedding = self.fetch_position_embedding_lfu_cache(embeddings, h, w).squeeze(0) | ||
| if t > 1: | ||
| position_embedding = position_embedding.tile((t, 1)) | ||
| if position_embedding.dtype != embeddings.dtype: | ||
| position_embedding = position_embedding.astype(embeddings.dtype) | ||
| packed_position_embeddings.append(position_embedding) | ||
| if len(packed_position_embeddings) == 1: | ||
| packed_position_embeddings = packed_position_embeddings[0] | ||
| else: | ||
| packed_position_embeddings = paddle.concat(packed_position_embeddings, axis=0) | ||
| embeddings = (embeddings + packed_position_embeddings).unsqueeze(0) | ||
| else: | ||
| embeddings = embeddings + self.packing_position_embedding(position_ids) | ||
| return embeddings | ||
|
|
@@ -307,7 +312,7 @@ def weight_loader(self, param, loaded_weight, loaded_shard_id: Optional[str] = N | |
|
|
||
| def forward(self, hidden_states: paddle.Tensor) -> paddle.Tensor: | ||
| hidden_states = self.fc1(hidden_states) | ||
| hidden_states = get_activation_fn(self.config.hidden_act)(hidden_states[0]) | ||
| hidden_states = get_activation_fn(self.config.hidden_act)(hidden_states) | ||
| hidden_states = self.fc2(hidden_states) | ||
| return hidden_states | ||
|
|
||
|
|
@@ -321,7 +326,7 @@ def __init__(self, config): | |
| self.layer_norm2 = paddle.nn.LayerNorm(self.embed_dim, epsilon=config.layer_norm_eps) | ||
| self.mlp = SiglipMLP(config) | ||
|
|
||
| def forward( | ||
| def _forward_impl( | ||
| self, | ||
| hidden_states, | ||
| attention_mask, | ||
|
|
@@ -331,9 +336,7 @@ def forward( | |
| cos_emb=None, | ||
| sin_emb=None, | ||
| ): | ||
|
|
||
| residual = hidden_states | ||
| ############################ | ||
| ln1_out = self.layer_norm1(hidden_states) | ||
|
|
||
| x = self.self_attn( | ||
|
|
@@ -346,18 +349,45 @@ def forward( | |
| sin_emb=sin_emb, | ||
| ) | ||
|
|
||
| hs_post_attn = residual + x | ||
|
|
||
| residual = hs_post_attn | ||
| hidden_states = residual + x | ||
| residual = hidden_states | ||
| ln2_out = self.layer_norm2(residual) | ||
|
|
||
| mlp_out = self.mlp(ln2_out) | ||
| return residual + mlp_out | ||
|
|
||
| hidden_states_out = residual + mlp_out | ||
|
|
||
| outputs = (hidden_states_out,) | ||
| def forward( | ||
| self, | ||
| hidden_states, | ||
| attention_mask, | ||
| output_attentions=False, | ||
| cu_seqlens=None, | ||
| max_seqlen=None, | ||
| cos_emb=None, | ||
| sin_emb=None, | ||
| ): | ||
| if hidden_states.dim() == 3 and hidden_states.shape[0] == 1: | ||
| hidden_states_out = self._forward_impl( | ||
| hidden_states=hidden_states[0], | ||
| attention_mask=attention_mask, | ||
| output_attentions=output_attentions, | ||
| cu_seqlens=cu_seqlens, | ||
| max_seqlen=max_seqlen, | ||
| cos_emb=cos_emb, | ||
| sin_emb=sin_emb, | ||
| ) | ||
| return (hidden_states_out.unsqueeze(0),) | ||
|
|
||
| return outputs | ||
| hidden_states_out = self._forward_impl( | ||
| hidden_states=hidden_states, | ||
| attention_mask=attention_mask, | ||
| output_attentions=output_attentions, | ||
| cu_seqlens=cu_seqlens, | ||
| max_seqlen=max_seqlen, | ||
| cos_emb=cos_emb, | ||
| sin_emb=sin_emb, | ||
| ) | ||
| return (hidden_states_out,) | ||
|
|
||
|
|
||
| class SigLIPRotaryEmbedding(nn.Layer): | ||
|
|
@@ -677,7 +707,6 @@ def forward( | |
| end = cu_seqlens[i + 1] | ||
| tensor = last_hidden_state[:, start:end, :].squeeze(0) | ||
| sample_hidden_state.append(tensor) | ||
|
|
||
| return sample_hidden_state | ||
|
|
||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -37,27 +37,34 @@ def rotate_half(x): | |
|
|
||
|
|
||
| def apply_rotary_pos_emb_vision(x, cos, sin): | ||
This comment was marked as outdated.
Sorry, something went wrong. |
||
| orig_dtype = x.dtype | ||
| x = x.astype("float32") | ||
| assert x.dtype == paddle.float32, f"expected float32, got {x.dtype}" | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ❓ 疑问 Python 建议改为显式异常: if x.dtype != paddle.float32:
raise TypeError(f"apply_rotary_pos_emb_vision: expected float32, got {x.dtype}") |
||
| x_embed = (x * cos) + (rotate_half(x) * sin) | ||
This comment was marked as outdated.
Sorry, something went wrong. |
||
| return x_embed.astype(orig_dtype) | ||
| return x_embed | ||
|
|
||
|
|
||
| def native_neox_rope_embedding(qkv, cos, sin, num_heads): | ||
| B, seq_length, D = qkv.shape | ||
| if seq_length == -1: | ||
| _, seq_length, _ = paddle.shape(qkv) | ||
| qkv = qkv.reshape( | ||
| [ | ||
| seq_length, | ||
| 3, | ||
| num_heads, | ||
| -1, | ||
| ] | ||
| ).transpose(perm=[1, 0, 2, 3]) | ||
| q, k, v = qkv.unbind(axis=0) | ||
| if qkv.dim() == 3: | ||
| B, seq_length, D = qkv.shape | ||
| if seq_length == -1: | ||
| _, seq_length, _ = paddle.shape(qkv) | ||
| token_count = B * seq_length | ||
| else: | ||
| token_count, D = qkv.shape | ||
| if token_count == -1: | ||
| token_count, _ = paddle.shape(qkv) | ||
| qkv = qkv.reshape([token_count, 3, num_heads, -1]) | ||
| q_dtype = qkv.dtype | ||
| if q_dtype != paddle.float32: | ||
| qk = qkv[:, :2].astype("float32") | ||
| q, k = qk[:, 0], qk[:, 1] | ||
| else: | ||
| q, k = qkv[:, 0], qkv[:, 1] | ||
| v = qkv[:, 2] | ||
| q = apply_rotary_pos_emb_vision(q, cos, sin) | ||
| k = apply_rotary_pos_emb_vision(k, cos, sin) | ||
| if q.dtype != q_dtype: | ||
| q = q.astype(q_dtype) | ||
| k = k.astype(q_dtype) | ||
| return q, k, v | ||
|
|
||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
❓ 疑问
assert h % m1 == 0 and w % m2 == 0用于运行时 grid shape 合法性校验,Python-O下会静默失效。建议改为: