Skip to content

Commit 5c80f0e

Browse files
committed
just used type: ignore instead
1 parent f32c061 commit 5c80f0e

File tree

1 file changed

+4
-8
lines changed

1 file changed

+4
-8
lines changed

llmfoundry/models/mpt/modeling_mpt.py

+4-8
Original file line numberDiff line numberDiff line change
@@ -954,18 +954,14 @@ def forward(
954954

955955
layer_kv_cache_dict = {}
956956
for b_idx, block in enumerate(self.blocks):
957-
# Added some assert statements
958-
assert isinstance(block, torch.nn.Module)
959-
assert isinstance(block.norm_attn_norm, torch.nn.Module)
960-
attn_block = block.norm_attn_norm.attn if self.blocks_fuse_norm_attn_norm else block.attn
961-
assert isinstance(attn_block, torch.nn.Module)
957+
attn_block = block.norm_attn_norm.attn if self.blocks_fuse_norm_attn_norm else block.attn # type: ignore
962958
if attn_block.reuse_kv_layer_idx is not None: # type: ignore
963-
if attn_block.reuse_kv_layer_idx not in layer_kv_cache_dict:
959+
if attn_block.reuse_kv_layer_idx not in layer_kv_cache_dict: # type: ignore
964960
raise KeyError(
965-
f'kv cache for layer {block.reuse_kv_layer_idx} not found in {layer_kv_cache_dict=}.',
961+
f'kv cache for layer {block.reuse_kv_layer_idx} not found in {layer_kv_cache_dict=}.', # type: ignore
966962
)
967963
prev_layer_key_value = layer_kv_cache_dict[
968-
attn_block.reuse_kv_layer_idx]
964+
attn_block.reuse_kv_layer_idx] # type: ignore
969965
else:
970966
prev_layer_key_value = None
971967
if output_hidden_states:

0 commit comments

Comments
 (0)