Skip to content
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
fix
  • Loading branch information
SunMarc committed Oct 7, 2025
commit a18fe86f3b8d514779072cdff4563a8f7fe3e282
4 changes: 2 additions & 2 deletions src/transformers/quantizers/quantizer_bnb_4bit.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,10 +153,10 @@ def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **
return True
module, name = get_module_from_name(model, param_name)
return isinstance(module, bnb.nn.Linear4bit) and name != "bias"

def update_param_name(self, param_name: str) -> str:
"""
Update param_name in order to get the module associated with the param.
Update param_name in order to get the module associated with the param.
This is useful for quantized stats lile absmax or quant_map as we need to update the param_name to get the module as they are stored in ...weight.absmax.
"""
if self.pre_quantized:
Expand Down