|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD 3-Clause license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +import copy |
| 8 | +from collections.abc import Iterable |
| 9 | + |
| 10 | +import torch |
| 11 | +import torch.distributed as dist |
| 12 | +from torch.distributed._tensor import DTensor |
| 13 | + |
| 14 | +from packaging import version |
| 15 | +from torchao.prototype.blockwise_fp8_training.linear import ( |
| 16 | + Float8BlockwiseLinear, |
| 17 | + Float8BlockwiseLinearConfig, |
| 18 | +) |
| 19 | +from torchao.quantization import quantize_ |
| 20 | +from torchao.testing.training.dtensor_utils import ToyModel |
| 21 | +from torchao.utils import is_sm_at_least_90 |
| 22 | + |
| 23 | + |
| 24 | +def get_blockwise_linear_skip_reason( |
| 25 | + *, |
| 26 | + triton_module, |
| 27 | + min_cuda_devices: int, |
| 28 | +) -> str | None: |
| 29 | + """Shared module-level gating for Float8BlockwiseLinear distributed tests. |
| 30 | +
|
| 31 | + This is intentionally separate from the lower-level kernel test gating because |
| 32 | + the module swap currently requires SM90+ and the newer scaled_mm/Triton path. |
| 33 | + """ |
| 34 | + if not torch.cuda.is_available(): |
| 35 | + return "CUDA not available" |
| 36 | + if torch.cuda.device_count() < min_cuda_devices: |
| 37 | + return f"Need at least {min_cuda_devices} CUDA devices" |
| 38 | + if not is_sm_at_least_90(): |
| 39 | + return "Float8BlockwiseLinear currently requires CUDA SM90+" |
| 40 | + if version.parse(triton_module.__version__) < version.parse("3.3.0"): |
| 41 | + return "Triton version < 3.3.0" |
| 42 | + return None |
| 43 | + |
| 44 | + |
| 45 | +def full_tensor(tensor: torch.Tensor) -> torch.Tensor: |
| 46 | + """Materialize a DTensor for parity checks, otherwise return the tensor as-is.""" |
| 47 | + return tensor.full_tensor() if isinstance(tensor, DTensor) else tensor |
| 48 | + |
| 49 | + |
| 50 | +def assert_close( |
| 51 | + actual: torch.Tensor, |
| 52 | + expected: torch.Tensor, |
| 53 | + *, |
| 54 | + atol: float = 2e-2, |
| 55 | + rtol: float = 2e-2, |
| 56 | +) -> None: |
| 57 | + """Compare eager tensors and DTensors using a common float32 tolerance path.""" |
| 58 | + torch.testing.assert_close( |
| 59 | + full_tensor(actual).float(), |
| 60 | + full_tensor(expected).float(), |
| 61 | + atol=atol, |
| 62 | + rtol=rtol, |
| 63 | + ) |
| 64 | + |
| 65 | + |
| 66 | +def set_blockwise_linear_use_triton( |
| 67 | + model: torch.nn.Module, |
| 68 | + use_triton: bool, |
| 69 | +) -> None: |
| 70 | + converted = 0 |
| 71 | + for module in model.modules(): |
| 72 | + if isinstance(module, Float8BlockwiseLinear): |
| 73 | + module.use_triton = use_triton |
| 74 | + converted += 1 |
| 75 | + if converted == 0: |
| 76 | + raise AssertionError("Expected at least one Float8BlockwiseLinear module") |
| 77 | + |
| 78 | + |
| 79 | +def broadcast_module(module: torch.nn.Module) -> None: |
| 80 | + for param in module.parameters(): |
| 81 | + dist.broadcast(param, src=0) |
| 82 | + |
| 83 | + |
| 84 | +def init_toy_model( |
| 85 | + *, |
| 86 | + size: int = 128, |
| 87 | + seed: int = 42, |
| 88 | + device: str | torch.device = "cuda", |
| 89 | + broadcast_weights: bool = False, |
| 90 | +) -> torch.nn.Module: |
| 91 | + torch.manual_seed(seed) |
| 92 | + model = ToyModel(size).to(device=device, dtype=torch.bfloat16) |
| 93 | + if broadcast_weights: |
| 94 | + broadcast_module(model) |
| 95 | + return model |
| 96 | + |
| 97 | + |
| 98 | +def make_quantized_toy_model_pair( |
| 99 | + *, |
| 100 | + size: int = 128, |
| 101 | + seed: int = 42, |
| 102 | + device: str | torch.device = "cuda", |
| 103 | + use_triton: bool, |
| 104 | + broadcast_weights: bool = False, |
| 105 | +) -> tuple[torch.nn.Module, torch.nn.Module]: |
| 106 | + ref_model = init_toy_model( |
| 107 | + size=size, |
| 108 | + seed=seed, |
| 109 | + device=device, |
| 110 | + broadcast_weights=broadcast_weights, |
| 111 | + ) |
| 112 | + dist_model = copy.deepcopy(ref_model) |
| 113 | + for model in (ref_model, dist_model): |
| 114 | + quantize_(model, Float8BlockwiseLinearConfig()) |
| 115 | + set_blockwise_linear_use_triton(model, use_triton) |
| 116 | + return ref_model, dist_model |
| 117 | + |
| 118 | + |
| 119 | +def get_replicated_local_batch( |
| 120 | + *, |
| 121 | + replica_count: int, |
| 122 | + replica_index: int, |
| 123 | + iter_idx: int, |
| 124 | + size: int = 128, |
| 125 | + device: str | torch.device = "cuda", |
| 126 | +) -> tuple[torch.Tensor, torch.Tensor]: |
| 127 | + """Build one global batch and hand each replica its deterministic local slice. |
| 128 | +
|
| 129 | + TP peers should see the same sample, while different data-parallel replicas |
| 130 | + should see different samples. Broadcasting from rank 0 keeps the reference |
| 131 | + and distributed models aligned across all ranks. |
| 132 | + """ |
| 133 | + torch.manual_seed(100 + iter_idx) |
| 134 | + global_input = torch.randn( |
| 135 | + replica_count, |
| 136 | + 1, |
| 137 | + size, |
| 138 | + size, |
| 139 | + device=device, |
| 140 | + dtype=torch.bfloat16, |
| 141 | + ) |
| 142 | + global_target = torch.randn_like(global_input) |
| 143 | + dist.broadcast(global_input, src=0) |
| 144 | + dist.broadcast(global_target, src=0) |
| 145 | + return ( |
| 146 | + global_input[replica_index].contiguous(), |
| 147 | + global_target[replica_index].contiguous(), |
| 148 | + ) |
| 149 | + |
| 150 | + |
| 151 | +def assert_parameters_are_dtensors(parameters: Iterable[torch.Tensor]) -> None: |
| 152 | + for param in parameters: |
| 153 | + assert isinstance(param, DTensor) |
| 154 | + |
| 155 | + |
| 156 | +def allreduce_reference_grads( |
| 157 | + model: torch.nn.Module, |
| 158 | + *, |
| 159 | + world_size: int, |
| 160 | + group=None, |
| 161 | +) -> None: |
| 162 | + for param in model.parameters(): |
| 163 | + assert param.grad is not None |
| 164 | + dist.all_reduce(param.grad, group=group) |
| 165 | + param.grad.div_(world_size) |
| 166 | + |
| 167 | + |
| 168 | +def assert_dtensor_parameter_grads_match( |
| 169 | + ref_parameters: Iterable[torch.nn.Parameter], |
| 170 | + dist_parameters: Iterable[torch.nn.Parameter], |
| 171 | +) -> None: |
| 172 | + for ref_param, dist_param in zip(ref_parameters, dist_parameters, strict=True): |
| 173 | + assert ref_param.grad is not None |
| 174 | + assert dist_param.grad is not None |
| 175 | + assert isinstance(dist_param, DTensor) |
| 176 | + assert isinstance(dist_param.grad, DTensor) |
| 177 | + assert_close(dist_param.grad, ref_param.grad) |
| 178 | + |
| 179 | + |
| 180 | +def assert_dtensor_parameter_values_match( |
| 181 | + ref_parameters: Iterable[torch.nn.Parameter], |
| 182 | + dist_parameters: Iterable[torch.nn.Parameter], |
| 183 | +) -> None: |
| 184 | + for ref_param, dist_param in zip(ref_parameters, dist_parameters, strict=True): |
| 185 | + assert isinstance(dist_param, DTensor) |
| 186 | + assert_close(dist_param, ref_param) |
0 commit comments