Data hosted with ♥ by Pastebin.com - Download Raw - See Original
  1. c
  2. c flops.f
  3. c
  4. c Author:
  5. c Yuri Sbitnev <yuri@linux-ekb.info>
  6. c
  7. c Copyright (c) 2008-2009 Yuri Sbitnev
  8. c
  9. c This program is free software; you can redistribute it and/or modify
  10. c it under the terms of the GNU General Public License as published by
  11. c the Free Software Foundation; either version 2 of the License, or
  12. c (at your option) any later version.
  13. c
  14. c This program is distributed in the hope that it will be useful,
  15. c but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. c MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. c GNU General Public License for more details.
  18. c
  19. c You should have received a copy of the GNU General Public License
  20. c along with this program; if not, write to the Free Software
  21. c Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  22. c MA 02111-1307 USA
  23.  
  24.  
  25. program calc_mflops
  26. include 'mpif.h'
  27. integer i, j, n
  28. double precision w, gsum, sum
  29. double precision v
  30. integer np, myid, ierr, niter, status(MPI_STATUS_SIZE)
  31. real*8 time, amflops, amflops1, time1, time2, dsecnd
  32. integer mflops, mflops1
  33. c Initialize MPI. Find number of processors.
  34. call MPI_INIT( ierr )
  35. call MPI_COMM_RANK( MPI_COMM_WORLD, myid, ierr )
  36. call MPI_COMM_SIZE( MPI_COMM_WORLD, np, ierr )
  37. c Zero process determines the number of points.
  38. if ( myid .eq. 0 ) then
  39. n = 200000000
  40. endif
  41.  
  42. time1 = MPI_Wtime()
  43.  
  44. c Send number of point from zero process to all other processes
  45. call MPI_BCAST(n, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
  46. c Calculate partial sum
  47. w = 1.0 / n
  48.  
  49. do j = 1, 4
  50. sum = 0.0d0
  51. do i = myid+1, n, np
  52. v = (i - 0.5d0 ) * w
  53. v = 4.0d0 / (1.0d0 + v * v)
  54. sum = sum + v
  55. end do
  56. end do
  57. c Summarize the partial sums and store it in zero process
  58. call MPI_REDUCE(sum, gsum, 1, MPI_DOUBLE_PRECISION,
  59. $ MPI_SUM, 0, MPI_COMM_WORLD, ierr)
  60.  
  61. time2 = MPI_Wtime()
  62. time = (time2 - time1) / 4
  63.  
  64. niter = 0
  65. do i = myid+1, n, np
  66. niter = niter + 1
  67. end do
  68.  
  69. mflops1 = 9 * niter / (1000000.0 * time)
  70.  
  71. c Zero process prints cluster benchmark results
  72. if (myid .eq. 0) then
  73. mflops = 9 * n / (1000000.0 * time)
  74. print *, ' '
  75. print '(A)', ' HPC Test ----------------------------------------'
  76. print '(A,I2.1)', ' Quantity of processors = ', np
  77. print '(A,F6.2,A)',
  78. $' Calculation time = ', time, ' seconds'
  79. print '(A,I6.1,A)',
  80. $' Cluster speed = ', mflops, ' MFLOPS'
  81. print '(A)', ' -------------------------------------------------'
  82. print '(A,I2.2,A,I6.1,A)',
  83. $' Cluster node N',0,' speed = ', mflops1, ' MFLOPS'
  84. c Collect and print benchmark results from individual processes
  85. do i = 1, np-1
  86. CALL MPI_RECV(mflops1, 1, MPI_REAL8, i, 0,
  87. $ MPI_COMM_WORLD, status, ierr)
  88. print '(A,I2.2,A,I6.1,A)',
  89. $' Cluster node N', i, ' speed = ', mflops1, ' MFLOPS'
  90. end do
  91. print '(A)', ' -------------------------------------------------'
  92. print *, ' '
  93. else
  94. c Send local process benchmark result to zero process
  95. call MPI_SEND(mflops1, 1, MPI_REAL8, 0, 0,
  96. $ MPI_COMM_WORLD, ierr)
  97. endif
  98.  
  99. c Close MPI
  100. call MPI_FINALIZE(ierr)
  101. end