Skip to content

Commit afd997f

Browse files
zucchini-nlpAhnJoonSung
authored andcommitted
Check model inputs - hidden states (huggingface#40994)
* update all models * fix copies * skip aria tests * update other models * skip should be in test, not tester * i think this is more descriptive as a name * find and replace for new models
1 parent 9839b59 commit afd997f

File tree

173 files changed

+393
-367
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

173 files changed

+393
-367
lines changed

‎examples/modular-transformers/modeling_dummy_bert.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -657,7 +657,7 @@ class PreTrainedModel
657657
for layer, heads in heads_to_prune.items():
658658
self.encoder.layer[layer].attention.prune_heads(heads)
659659

660-
@check_model_inputs
660+
@check_model_inputs()
661661
@auto_docstring
662662
def forward(
663663
self,

‎examples/modular-transformers/modeling_roberta.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -660,7 +660,7 @@ class PreTrainedModel
660660
for layer, heads in heads_to_prune.items():
661661
self.encoder.layer[layer].attention.prune_heads(heads)
662662

663-
@check_model_inputs
663+
@check_model_inputs()
664664
@auto_docstring
665665
def forward(
666666
self,

‎examples/modular-transformers/modeling_super.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,7 @@ def __init__(self, config: SuperConfig):
323323
# Initialize weights and apply final processing
324324
self.post_init()
325325

326-
@check_model_inputs
326+
@check_model_inputs()
327327
@auto_docstring
328328
def forward(
329329
self,

‎src/transformers/models/aimv2/modeling_aimv2.py‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,7 @@ def get_input_embeddings(self) -> nn.Module:
444444
return self.embeddings.patch_embed
445445

446446
@deprecate_kwarg("attention_mask", version="v4.58.0")
447-
@check_model_inputs
447+
@check_model_inputs(tie_last_hidden_states=False)
448448
@auto_docstring
449449
def forward(
450450
self,
@@ -520,7 +520,7 @@ def get_input_embeddings(self) -> nn.Module:
520520
def set_input_embeddings(self, value):
521521
self.embeddings.token_embedding = value
522522

523-
@check_model_inputs
523+
@check_model_inputs(tie_last_hidden_states=False)
524524
@auto_docstring
525525
def forward(
526526
self,

‎src/transformers/models/aimv2/modular_aimv2.py‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -488,7 +488,7 @@ def get_input_embeddings(self) -> nn.Module:
488488
return self.embeddings.patch_embed
489489

490490
@deprecate_kwarg("attention_mask", version="v4.58.0")
491-
@check_model_inputs
491+
@check_model_inputs(tie_last_hidden_states=False)
492492
@auto_docstring
493493
def forward(
494494
self,
@@ -564,7 +564,7 @@ def get_input_embeddings(self) -> nn.Module:
564564
def set_input_embeddings(self, value):
565565
self.embeddings.token_embedding = value
566566

567-
@check_model_inputs
567+
@check_model_inputs(tie_last_hidden_states=False)
568568
@auto_docstring
569569
def forward(
570570
self,

‎src/transformers/models/albert/modeling_albert.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -457,7 +457,7 @@ def _prune_heads(self, heads_to_prune: dict[int, list[int]]) -> None:
457457
inner_group_idx = int(layer - group_idx * self.config.inner_group_num)
458458
self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)
459459

460-
@check_model_inputs
460+
@check_model_inputs()
461461
@auto_docstring
462462
def forward(
463463
self,

‎src/transformers/models/apertus/modeling_apertus.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -339,7 +339,7 @@ def __init__(self, config: ApertusConfig):
339339
# Initialize weights and apply final processing
340340
self.post_init()
341341

342-
@check_model_inputs
342+
@check_model_inputs()
343343
@auto_docstring
344344
def forward(
345345
self,

‎src/transformers/models/arcee/modeling_arcee.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ def __init__(self, config: ArceeConfig):
344344
# Initialize weights and apply final processing
345345
self.post_init()
346346

347-
@check_model_inputs
347+
@check_model_inputs()
348348
@auto_docstring
349349
def forward(
350350
self,

‎src/transformers/models/aria/modeling_aria.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -669,7 +669,7 @@ def __init__(self, config: AriaTextConfig):
669669
# Initialize weights and apply final processing
670670
self.post_init()
671671

672-
@check_model_inputs
672+
@check_model_inputs()
673673
@auto_docstring
674674
def forward(
675675
self,

‎src/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -363,7 +363,7 @@ class PreTrainedModel
363363
for layer, heads in heads_to_prune.items():
364364
self.encoder.layer[layer].attention.prune_heads(heads)
365365

366-
@check_model_inputs
366+
@check_model_inputs()
367367
@auto_docstring
368368
def forward(
369369
self,

0 commit comments

Comments
 (0)