Skip to content

Commit e1eb45d

Browse files
[Bugfix] Fix precommit - line too long in pixtral.py (vllm-project#14960)
Signed-off-by: Tyler Michael Smith <[email protected]> Co-authored-by: DarkLight1337 <[email protected]>
1 parent 89fca67 commit e1eb45d

File tree

2 files changed

+6
-5
lines changed

2 files changed

+6
-5
lines changed

requirements/test.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ mbstrdecoder==1.1.3
235235
# typepy
236236
mdurl==0.1.2
237237
# via markdown-it-py
238-
mistral-common==1.5.1
238+
mistral-common==1.5.4
239239
# via -r requirements/test.in
240240
more-itertools==10.5.0
241241
# via lm-eval

vllm/model_executor/models/pixtral.py

+5-4
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ class PixtralImagePixelInputs(TypedDict):
7373
"""
7474
A boolean mask indicating which image embeddings correspond
7575
to patch tokens.
76-
76+
7777
Shape: `(batch_size, num_images, num_embeds)`
7878
"""
7979

@@ -849,10 +849,10 @@ def forward(
849849
) -> torch.Tensor:
850850
"""
851851
Args:
852-
images: list of N_img images of variable sizes,
852+
images: list of N_img images of variable sizes,
853853
each of shape (C, H, W)
854854
Returns:
855-
image_features: tensor of token features for
855+
image_features: tensor of token features for
856856
all tokens of all images of shape (N_toks, D)
857857
"""
858858
# pass images through initial convolution independently
@@ -935,7 +935,8 @@ def forward(self, x: torch.Tensor,
935935
# x is (N, vision_encoder_dim)
936936
x = self.permute(x, image_sizes)
937937

938-
# x is (N / spatial_merge_size ** 2, vision_encoder_dim * spatial_merge_size ** 2)
938+
# x is (N / spatial_merge_size ** 2,
939+
# vision_encoder_dim * spatial_merge_size ** 2)
939940
x = self.merging_layer(x)
940941

941942
# x is (N / spatial_merge_size ** 2, vision_encoder_dim)

0 commit comments

Comments
 (0)