Skip to content

Commit 22cc01b

Browse files
authored
unbreak gptq int4 test and add to CI (#4223)
Update [ghstack-poisoned]
1 parent a302c10 commit 22cc01b

3 files changed

Lines changed: 4 additions & 3 deletions

File tree

.github/workflows/1xH100_tests.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,3 +60,4 @@ jobs:
6060
pytest test/prototype/moe_training/test_fp8_grouped_mm.py --verbose -s
6161
pytest test/prototype/moe_training/test_mxfp8_grouped_mm.py --verbose -s
6262
pytest test/prototype/moe_training/test_training.py --verbose -s
63+
pytest test/prototype/gptq/ --verbose -s

test/prototype/gptq/test_gptqv2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -429,7 +429,7 @@ def test_gptq_quantize_function(self, base_config):
429429
id="int4",
430430
marks=pytest.mark.skipif(
431431
not _is_mslk_available(),
432-
reason="fbgemm_gpu not available",
432+
reason="mslk not available",
433433
),
434434
),
435435
pytest.param(

torchao/prototype/gptq/api.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
import torch.nn as nn
1414

1515
try:
16-
from mslk.quantize import int4_row_quantize_zp, pack_int4
16+
from mslk.quantize.shuffle import int4_row_quantize_zp, pack_int4
1717
except:
1818
int4_row_quantize_zp = None
1919
pack_int4 = None
@@ -71,7 +71,7 @@ def __post_init__(self):
7171
if isinstance(self.base_config, Int4WeightOnlyConfig):
7272
if int4_row_quantize_zp is None:
7373
raise ValueError(
74-
"fbgemm_gpu is not installed. Please install fbgemm_gpu to use int4 quantization."
74+
"mslk is not installed. Please install mslk to use int4 quantization."
7575
)
7676

7777

0 commit comments

Comments
 (0)