Skip to content

Commit 251a7e7

Browse files
rename sym mem buffer file
1 parent a61337f commit 251a7e7

10 files changed

Lines changed: 10 additions & 10 deletions

File tree

benchmarks/prototype/moe_training/mxfp8/ep/syncless/bench_moe_e2e.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
MXFP8TrainingOpConfig,
4848
MXFP8TrainingRecipe,
4949
)
50-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
50+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
5151
SymmetricMemoryBufferManager,
5252
)
5353
from torchao.prototype.moe_training.ep.syncless.expert_parallel import (

benchmarks/prototype/moe_training/mxfp8/ep/syncless/bench_token_combine.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828

2929
from benchmarks.utils import profile_fn
3030
from torchao.prototype.moe_training.ep.permute import permute_and_pad
31-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
31+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
3232
SymmetricMemoryBufferManager,
3333
)
3434
from torchao.prototype.moe_training.ep.syncless.token_combine import token_combine

test/prototype/moe_training/ep/syncless/test_dispatch_with_managed_buffer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ def test_save_activations_in_managed_buffer(self):
148148
total_global_tokens + 128 * experts_per_rank * self.world_size
149149
)
150150

151-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
151+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
152152
get_buffer_manager,
153153
)
154154

test/prototype/moe_training/ep/syncless/test_token_combine.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ def test_token_combine(self):
9494
total_global_tokens + 128 * experts_per_rank * self.world_size
9595
)
9696

97-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
97+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
9898
get_buffer_manager,
9999
)
100100

test/prototype/moe_training/ep/syncless/test_token_dispatch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ def test_mxfp8_a2a(self):
105105
)
106106

107107
# Preallocate symmetric memory buffers
108-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
108+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
109109
get_buffer_manager,
110110
)
111111

torchao/prototype/moe_training/ep/syncless/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
1+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
22
SymmetricMemoryBufferManager, # noqa: F401
33
get_buffer_manager, # noqa: F401
44
)

torchao/prototype/moe_training/ep/syncless/expert_parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
from torch.distributed.tensor import Shard
3333
from torch.distributed.tensor.parallel import ParallelStyle
3434

35-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
35+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
3636
SymmetricMemoryBufferManager,
3737
get_buffer_manager,
3838
)

torchao/prototype/moe_training/ep/syncless/buffer_manager.py renamed to torchao/prototype/moe_training/ep/syncless/sym_mem_buffer_manager.py

File renamed without changes.

torchao/prototype/moe_training/ep/syncless/token_combine.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
)
2424
from torch.library import triton_op, wrap_triton
2525

26-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
26+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
2727
SymmetricMemoryBufferManager,
2828
)
2929

@@ -65,7 +65,7 @@ def forward(
6565
buffer_manager: optional buffer manager for reusing buffers.
6666
token_alignment: expert token group alignment (default 128).
6767
"""
68-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
68+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
6969
get_buffer_manager,
7070
)
7171

torchao/prototype/moe_training/ep/syncless/token_dispatch.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
import triton.language as tl
55
from torch.library import triton_op, wrap_triton
66

7-
from torchao.prototype.moe_training.ep.syncless.buffer_manager import (
7+
from torchao.prototype.moe_training.ep.syncless.sym_mem_buffer_manager import (
88
SymmetricMemoryBufferManager,
99
get_buffer_manager,
1010
)

0 commit comments

Comments
 (0)