Skip to content

Commit 79159f2

Browse files
authored
fix ruff on main branch (#4207)
Summary: broken by #4200, fixing ``` ruff format ruff check . -fix ``` Test Plan: CI
1 parent 7dd8be5 commit 79159f2

2 files changed

Lines changed: 5 additions & 2 deletions

File tree

test/quantization/quantize_/workflows/float8/test_float8_tensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
import unittest
99
from contextlib import nullcontext
1010
from typing import Tuple
11+
from unittest.mock import patch
1112

1213
import torch
1314
from torch._inductor.utils import run_and_get_code
@@ -38,7 +39,6 @@
3839
is_sm_at_least_100,
3940
torch_version_at_least,
4041
)
41-
from unittest.mock import patch
4242

4343
# Needed since changing args to function causes recompiles
4444
torch._dynamo.config.cache_size_limit = 128

torchao/float8/inference.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,10 @@ def _check_hardware_support(
296296

297297
if is_per_tensor or is_per_row:
298298
assert torch.xpu.is_available() or (
299-
torch.cuda.is_available() and is_sm_at_least_89() or is_MI300() or is_MI350()
299+
torch.cuda.is_available()
300+
and is_sm_at_least_89()
301+
or is_MI300()
302+
or is_MI350()
300303
), (
301304
"Float8 dynamic quantization requires CUDA compute capability ≥8.9 or MI300+ or XPU."
302305
)

0 commit comments

Comments
 (0)