Advertisement
Guest User

Untitled

a guest
Aug 1st, 2015
220
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 3.33 KB | None | 0 0
  1. /*
  2. * OSU One Sided MPI_Put Bandwidth test v2.2
  3. */
  4.  
  5. /*
  6. * Copyright (C) 2003-2006 the Network-Based Computing Laboratory
  7. * (NBCL), The Ohio State University.
  8. *
  9. */
  10.  
  11. #define GPU 0
  12.  
  13. #include <stdio.h>
  14. #include <unistd.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <math.h>
  18. #include <assert.h>
  19. #include "mpi.h"
  20. #include "cuda.h"
  21. #include "cudatest.h"
  22.  
  23. #define MYBUFSIZE (150000000) /* ~= 100M Bytes */
  24. #define MAX_REQ_NUM 100
  25.  
  26. /* Note we have a upper limit for buffer size, so be extremely careful
  27. * if you want to change the loop size or warm up size */
  28. #define MAX_SIZE (1<<22)
  29. #define WINDOW_SIZE (32)
  30.  
  31. char s_buf1[MAX_SIZE + 4096];
  32. char r_buf1[MYBUFSIZE + 4096];
  33. MPI_Request request[MAX_REQ_NUM];
  34. CUdeviceptr dptr_send, dptr_recv, gpu_send_buf, gpu_recv_buf;
  35. static int checkbuf = 0;
  36.  
  37. static void usage(void) {
  38. printf("osu_put_bw_cuda [-c]\n");
  39. }
  40.  
  41. int main (int argc, char *argv[])
  42. {
  43. int myid, numprocs, i, j, k;
  44. int size, page_size;
  45. char *host_send_buf, *host_recv_buf;
  46. double t_start = 0.0, t_end = 0.0, t = 0.0;
  47. int destrank;
  48. int c;
  49.  
  50. MPI_Group comm_group, group;
  51. MPI_Win win;
  52. int color;
  53. MPI_Comm newcomm;
  54.  
  55. MPI_Init (&argc, &argv);
  56. MPI_Comm_size (MPI_COMM_WORLD, &numprocs);
  57. MPI_Comm_rank (MPI_COMM_WORLD, &myid);
  58.  
  59. printf("Sleeping for 15 pid=%d\n", getpid());
  60. sleep(15);
  61.  
  62. page_size = getpagesize ();
  63. host_send_buf =
  64. (char *) (((unsigned long) s_buf1 + (page_size - 1)) / page_size *
  65. page_size);
  66. host_recv_buf =
  67. (char *) (((unsigned long) r_buf1 + (page_size - 1)) / page_size *
  68. page_size);
  69. assert ((host_send_buf != NULL) && (host_recv_buf != NULL));
  70. assert (MAX_SIZE * WINDOW_SIZE < MYBUFSIZE);
  71.  
  72. /* Load up the send buffer in case we plan on checking the results.
  73. Just store the index in each location. */
  74. for (i = 0; i < MAX_SIZE >> 2; i++) {
  75. ((int *)host_send_buf)[i] = i;
  76. }
  77. for (i = 0; i < MYBUFSIZE >> 2; i++) {
  78. ((int *)host_recv_buf)[i] = -1;
  79. }
  80.  
  81. if (myid == 0) {
  82. fprintf (stdout,
  83. "# OSU MPI2 MPI_Put Bandwidth Test (Version 2.2)\n");
  84. fprintf (stdout, "# Size\t\tBandwidth (MillionBytes/s) \n");
  85. fflush(stdout);
  86. }
  87.  
  88. if ((myid == 0) || (myid == 1)) {
  89. color = 0;
  90. } else {
  91. color = 1;
  92. }
  93. MPI_Comm_split(MPI_COMM_WORLD, color, 0, &newcomm);
  94. MPI_Comm_group (newcomm, &comm_group);
  95.  
  96. /* Only ranks 0 and 1 play */
  97. if ((myid == 0) || (myid == 1)) {
  98. size = 1;
  99.  
  100. /* Window creation and test */
  101. if (myid == 0) {
  102. MPI_Win_create (host_recv_buf, size, 1, MPI_INFO_NULL,
  103. newcomm, &win);
  104. destrank = 1;
  105. MPI_Group_incl (comm_group, 1, &destrank, &group);
  106. MPI_Win_start (group, 0, win);
  107. MPI_Put (host_send_buf, size, MPI_CHAR, 1, size, size,
  108. MPI_CHAR, win);
  109. MPI_Win_complete (win);
  110. } else {
  111. MPI_Win_create (host_recv_buf, size * WINDOW_SIZE, 1, MPI_INFO_NULL,
  112. newcomm, &win);
  113. destrank = 0;
  114. MPI_Group_incl (comm_group, 1, &destrank, &group);
  115. MPI_Win_post (group, 0, win);
  116. MPI_Win_wait (win);
  117. }
  118.  
  119. MPI_Barrier (newcomm);
  120. MPI_Win_free (&win);
  121. }
  122. MPI_Barrier (MPI_COMM_WORLD);
  123. MPI_Finalize ();
  124. if (0 == myid) {
  125. printf("MPITEST_results: PASSED\n");
  126. }
  127. return 0;
  128. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement