Skip to content

Commit 7757403

Browse files
Liclaude
andcommitted
Use Float8TrainingOpConfig instead of removed FP8GroupedMMConfig alias
FP8GroupedMMConfig was a temporary BC alias in torchao that has been removed (pytorch/ao#4069). Use the canonical Float8TrainingOpConfig name. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
1 parent fca8ab7 commit 7757403

1 file changed

Lines changed: 2 additions & 2 deletions

File tree

torchtitan/components/quantization/float8.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ def convert(self, model: nn.Module):
274274
from torchao.quantization.quant_api import quantize_
275275

276276
try:
277-
from torchao.prototype.moe_training.config import FP8GroupedMMConfig
277+
from torchao.prototype.moe_training.config import Float8TrainingOpConfig
278278
except ImportError as e:
279279
raise ImportError(
280280
"torchao installation does not have MoE training support. Please install torchao nightly build."
@@ -293,7 +293,7 @@ def moe_module_filter_fn(mod: nn.Module, cur_fqn: str) -> bool:
293293
model, ["_init_mean", "_init_std"], nn_module_cls=nn.Linear
294294
)
295295

296-
config = FP8GroupedMMConfig()
296+
config = Float8TrainingOpConfig()
297297
quantize_(model, config=config, filter_fn=moe_module_filter_fn)
298298

299299
# Re-inject Linear protocol and re-attach attrs

0 commit comments

Comments
 (0)