Advertisement
Guest User

Untitled

a guest
Mar 21st, 2019
49
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 0.77 KB | None | 0 0
  1. import os
  2. import torch
  3. from tqdm import tqdm
  4. import time
  5.  
  6. # declare which gpu device to use
  7. cuda_device = '0'
  8.  
  9. def check_mem(cuda_device):
  10. devices_info = os.popen('"/usr/bin/nvidia-smi" --query-gpu=memory.total,memory.used --format=csv,nounits,noheader').read().strip().split("\n")
  11. total, used = devices_info[int(cuda_device)].split(',')
  12. return total,used
  13.  
  14. def occumpy_mem(cuda_device):
  15. total, used = check_mem(cuda_device)
  16. total = int(total)
  17. used = int(used)
  18. max_mem = int(total * 0.9)
  19. block_mem = max_mem - used
  20. x = torch.cuda.FloatTensor(256,1024,block_mem)
  21. del x
  22.  
  23. if __name__ == '__main__':
  24. os.environ["CUDA_VISIBLE_DEVICES"] = cuda_device
  25. occumpy_mem(cuda_device)
  26. for _ in tqdm(range(60)):
  27. time.sleep(1)
  28. print('Done')
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement