|
import torch |
|
import torch.distributed as dist |
|
|
|
def init_process(): |
|
dist.init_process_group(backend="nccl") |
|
torch.cuda.set_device(dist.get_rank()) |
|
|
|
def example_broadcast(): |
|
if dist.get_rank() == 0: |
|
tensor = torch.tensor([1, 2, 3, 4], dtype=torch.float32).cuda() |
|
else: |
|
tensor = torch.zeros(4, dtype=torch.float32).cuda() |
|
|
|
print(f"Before broadcast on rank {dist.get_rank()}: {tensor}") |
|
dist.broadcast(tensor, src=0) |
|
print(f"After broadcast on rank {dist.get_rank()}: {tensor}") |
|
|
|
init_process() |
|
example_broadcast() |
|
dist.destroy_process_group() |