Advertisement
Guest User

Untitled

a guest
Apr 23rd, 2019
95
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 0.63 KB | None | 0 0
  1. import torch
  2. import torch.distributed as dist
  3.  
  4.  
  5. backend = "nccl"
  6. MiB = 1024 * 1024
  7.  
  8.  
  9. def worker(rank):
  10. for size in [25, 30, 40]:
  11. for itr in range(1000):
  12. x = torch.randn(int(25 * MiB), device='cuda')
  13. dist.broadcast(x, src=1, async_op=False)
  14. del x
  15.  
  16.  
  17. def main(rank, init_method, world_size):
  18. torch.cuda.set_device(rank)
  19. dist.init_process_group(backend, init_method, rank=rank, world_size=world_size)
  20. worker(rank)
  21.  
  22. if __name__ == '__main__':
  23. init_method = 'tcp://127.0.0.1:23123'
  24. world_size = 2
  25. torch.multiprocessing.spawn(main, (init_method, world_size), nprocs=world_size)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement