Skip to content
Next Next commit
update all models
  • Loading branch information
zucchini-nlp committed Sep 18, 2025
commit 9ee3f431c32e9e5f6c0888f6b2b0ca3f6c289dd6
4 changes: 2 additions & 2 deletions src/transformers/models/aimv2/modeling_aimv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,7 @@ def get_input_embeddings(self) -> nn.Module:
return self.embeddings.patch_embed

@deprecate_kwarg("attention_mask", version="v4.58.0")
@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -520,7 +520,7 @@ def get_input_embeddings(self) -> nn.Module:
def set_input_embeddings(self, value):
self.embeddings.token_embedding = value

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/aimv2/modular_aimv2.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ def get_input_embeddings(self) -> nn.Module:
return self.embeddings.patch_embed

@deprecate_kwarg("attention_mask", version="v4.58.0")
@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -564,7 +564,7 @@ def get_input_embeddings(self) -> nn.Module:
def set_input_embeddings(self, value):
self.embeddings.token_embedding = value

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/apertus/modeling_apertus.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ def __init__(self, config: ApertusConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/arcee/modeling_arcee.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def __init__(self, config: ArceeConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/aria/modeling_aria.py
Original file line number Diff line number Diff line change
Expand Up @@ -721,7 +721,7 @@ def __init__(self, config: AriaTextConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/aya_vision/modeling_aya_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def get_placeholder_mask(
)
return special_image_mask

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/aya_vision/modular_aya_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def get_image_features(
image_features = self.multi_modal_projector(selected_image_feature)
return image_features

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/bitnet/modeling_bitnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def __init__(self, config: BitNetConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/blip/modeling_blip.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ def __init__(self, config: BlipVisionConfig):

self.post_init()

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/blip_2/modeling_blip_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ def __init__(self, config: Blip2VisionConfig):

self.post_init()

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -1007,7 +1007,7 @@ def get_extended_attention_mask(
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/cohere/modeling_cohere.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ def __init__(self, config: CohereConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/cohere2/modeling_cohere2.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ def __init__(self, config: Cohere2Config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ def get_placeholder_mask(
)
return special_image_mask

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -306,7 +306,7 @@ def vision_tower(self):
def multi_modal_projector(self):
return self.model.multi_modal_projector

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def get_image_features(self, pixel_values: torch.FloatTensor):
image_features = self.multi_modal_projector(selected_image_feature)
return image_features

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -160,7 +160,7 @@ class Cohere2VisionForConditionalGeneration(AyaVisionForConditionalGeneration):
def get_image_features(self, pixel_values: torch.FloatTensor):
return self.model.get_image_features(pixel_values=pixel_values)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/csm/modeling_csm.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ def __init__(self, config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -662,7 +662,7 @@ def __init__(self, config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/csm/modular_csm.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ def __init__(self, config):
self.embed_tokens = nn.Embedding((config.num_codebooks * config.vocab_size), config.backbone_hidden_size)
self.inputs_embeds_projector = nn.Linear(config.backbone_hidden_size, config.hidden_size, bias=False)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -395,7 +395,7 @@ def __init__(self, config):
super().__init__(config)
self.embed_tokens = CsmBackboneModelEmbeddings(config)

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(self, **super_kwargs):
r"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ def __init__(self, config: DeepseekV2Config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -539,7 +539,7 @@ def __init__(self, config: DeepseekV3Config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/deit/modeling_deit.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/diffllama/modeling_diffllama.py
Original file line number Diff line number Diff line change
Expand Up @@ -608,7 +608,7 @@ def __init__(self, config: DiffLlamaConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/dinov2/modeling_dinov2.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -624,7 +624,7 @@ def __init__(self, config):
def get_input_embeddings(self) -> Dinov2PatchEmbeddings:
return self.embeddings.patch_embeddings

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, **kwargs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -518,7 +518,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down Expand Up @@ -644,7 +644,7 @@ def __init__(self, config):
def get_input_embeddings(self) -> Dinov2WithRegistersPatchEmbeddings:
return self.embeddings.patch_embeddings

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/dinov3_vit/modeling_dinov3_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -499,7 +499,7 @@ def __init__(self, config: DINOv3ViTConfig):
def get_input_embeddings(self):
return self.embeddings.patch_embeddings

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/dinov3_vit/modular_dinov3_vit.py
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ def __init__(self, config: DINOv3ViTConfig):
def get_input_embeddings(self):
return self.embeddings.patch_embeddings

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/doge/modeling_doge.py
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,7 @@ def __init__(self, config: DogeConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/dots1/modeling_dots1.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,7 @@ def __init__(self, config: Dots1Config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/dpt/modeling_dpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -808,7 +808,7 @@ class PreTrainedModel
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)

@check_model_inputs
@check_model_inputs(post_ln_hiddens=False)
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -680,7 +680,7 @@ def __init__(self, config: EfficientLoFTRConfig):

self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/emu3/modeling_emu3.py
Original file line number Diff line number Diff line change
Expand Up @@ -1166,7 +1166,7 @@ def __init__(self, config: Emu3Config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/eomt/modeling_eomt.py
Original file line number Diff line number Diff line change
Expand Up @@ -1087,7 +1087,7 @@ def get_loss_dict(
def get_loss(self, loss_dict: dict[str, Tensor]) -> Tensor:
return sum(loss_dict.values())

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/eomt/modular_eomt.py
Original file line number Diff line number Diff line change
Expand Up @@ -492,7 +492,7 @@ def _disable_attention_mask(attn_mask, prob, num_query_tokens, encoder_start_tok

return attn_mask

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/ernie4_5/modeling_ernie4_5.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ def __init__(self, config: Ernie4_5Config):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,7 @@ def __init__(self, config: Ernie4_5_MoeConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ def __init__(self, config: Ernie4_5_MoeConfig):
# Initialize weights and apply final processing
self.post_init()

@check_model_inputs
@check_model_inputs()
@auto_docstring
def forward(
self,
Expand Down
Loading