Advertisement
Guest User

spanAverage

a guest
May 20th, 2021
167
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Fortran 16.68 KB | None | 0 0
  1. !****************************************************************************************
  2. !****************************************************************************************
  3. ! MODULE types_vars
  4. ! Sets precision and common constants
  5. MODULE types_vars
  6.   USE mpi
  7.   IMPLICIT NONE
  8.  
  9.   ! Symbolic names for kind types of 4-, 2- and 1-byte integers:
  10.   INTEGER, PARAMETER :: I4B = SELECTED_INT_KIND(9)
  11.   INTEGER, PARAMETER :: I2B = SELECTED_INT_KIND(4)
  12.   INTEGER, PARAMETER :: I1B = SELECTED_INT_KIND(2)
  13.   ! Symbolic names for kind types of single- and double-precison reals
  14.   INTEGER, PARAMETER :: SP = KIND(1.0)
  15.   INTEGER, PARAMETER :: DP = KIND(1.0D0)
  16.   ! Symbolic names for kind types of single- and double-precison complex
  17.   INTEGER, PARAMETER :: SPC = KIND((1.0,1.0))
  18.   INTEGER, PARAMETER :: DPC = KIND((1.0D0,1.0D0))
  19.   ! Symbolic name for kind type of default logical
  20.   INTEGER, PARAMETER :: LOGIC = KIND(.true.)
  21.   ! Frequently used mathematical constants (with precision to spare)
  22.   REAL(DP), PARAMETER :: zero  = 0.0_dp
  23.   REAL(DP), PARAMETER :: half  = 0.5_dp
  24.   REAL(DP), PARAMETER :: one   = 1.0_dp
  25.   REAL(DP), PARAMETER :: two   = 2.0_dp
  26.   REAL(DP), PARAMETER :: three = 3.0_dp
  27.   REAL(DP), PARAMETER :: four  = 4.0_dp
  28.   REAL(DP), PARAMETER :: pi    = 3.141592653589793238462643383279502884197_dp
  29.   REAL(DP), PARAMETER :: pio2  = 1.57079632679489661923132169163975144209858_dp
  30.   REAL(DP), PARAMETER :: twopi = 6.283185307179586476925286766559005768394_dp
  31.  
  32. END MODULE types_vars
  33.  
  34. !****************************************************************************************
  35. !****************************************************************************************
  36. ! MODULE variables
  37. ! Contains subroutines to set variables, allocate, and deallocate memory
  38. MODULE variables
  39.   USE types_vars
  40.   USE mpi
  41.   IMPLICIT NONE
  42.  
  43.   ! MPI variables
  44.   INTEGER :: ierr, myid, status(MPI_STATUS_SIZE)
  45.   INTEGER :: nprocs, nprocs_x, nprocs_y, nprocs_z
  46.   INTEGER :: XVEC_TYPE, YCONT_TYPE
  47.   INTEGER :: xsrc, xdst, ysrc, ydst, zsrc, zdst
  48.   INTEGER :: comm3d, comm3d_rank
  49.   LOGICAL :: isperiodic(3), reorder
  50.   INTEGER :: dims(3), coords(3)
  51.   INTEGER, ALLOCATABLE, DIMENSION(:) :: all_coords
  52.   ! Grid
  53.   INTEGER :: nx, ny, nz
  54.   REAL(DP) :: xmin, xmax, ymin, ymax, zmin, zmax
  55.   REAL(DP) :: dx, dy, dz
  56.   REAL(DP), ALLOCATABLE, DIMENSION(:,:,:) :: x, y, z
  57.   ! Buffer points
  58.   INTEGER :: bufx, bufy, bufz                              
  59.   ! Solution
  60.   REAL(DP), ALLOCATABLE, DIMENSION(:,:,:) :: u, u_init
  61.   ! Spanwise average stuff
  62.   REAL(DP), ALLOCATABLE, DIMENSION(:,:) :: cpu_x
  63.   REAL(DP), ALLOCATABLE, DIMENSION(:,:) :: u_2d_x_avg
  64.  
  65. CONTAINS
  66.  
  67.   !****************************************************************************************
  68.   ! SUBROUTINE memalloc
  69.   ! Allocates memory for needed variables
  70.   SUBROUTINE memalloc
  71.     IMPLICIT NONE
  72.    
  73.     ! Cartesian grid
  74.     ALLOCATE(x(0-bufx:nx+bufx,0-bufy:ny+bufy,0-bufz:nz+bufz))
  75.     ALLOCATE(y(0-bufx:nx+bufx,0-bufy:ny+bufy,0-bufz:nz+bufz))
  76.     ALLOCATE(z(0-bufx:nx+bufx,0-bufy:ny+bufy,0-bufz:nz+bufz))
  77.     ! Solution variable
  78.     ALLOCATE(u     (0-bufx:nx+bufx,0-bufy:ny+bufy,0-bufz:nz+bufz))
  79.     ALLOCATE(u_init(0:nx,0:ny,0:nz))
  80.     ! Spanwise average
  81.     ALLOCATE(cpu_x(0:nprocs_x-1,0:nprocs_z-1))
  82.     ALLOCATE(u_2d_x_avg(0:nx,0:ny))
  83.    
  84.   END SUBROUTINE memalloc
  85.  
  86.   !****************************************************************************************
  87.   ! SUBROUTINE dealloc
  88.   ! Deallocates memory
  89.   SUBROUTINE dealloc
  90.     IMPLICIT NONE
  91.    
  92.     DEALLOCATE(x)
  93.     DEALLOCATE(y)
  94.     DEALLOCATE(z)
  95.     DEALLOCATE(u)
  96.     DEALLOCATE(u_init)
  97.     DEALLOCATE(cpu_x)
  98.     DEALLOCATE(u_2d_x_avg)
  99.    
  100.   END SUBROUTINE dealloc
  101.  
  102.   !****************************************************************************************
  103.  
  104. END MODULE variables
  105.  
  106. !****************************************************************************************
  107. !****************************************************************************************
  108. ! MODULE io
  109. ! Contains subroutines for input and output
  110. MODULE io
  111.   USE types_vars
  112.   USE variables
  113.   USE mpi
  114.   IMPLICIT NONE
  115.  
  116. CONTAINS
  117.  
  118.   !****************************************************************************************
  119.   ! SUBROUTINE input
  120.   ! Reads in variables from input file
  121.   SUBROUTINE input
  122.     IMPLICIT NONE
  123.    
  124.     OPEN(unit=1,file='input.txt')
  125.     READ(1,*) nx
  126.     READ(1,*) ny
  127.     READ(1,*) nz
  128.     READ(1,*) xmin
  129.     READ(1,*) xmax
  130.     READ(1,*) ymin
  131.     READ(1,*) ymax
  132.     READ(1,*) zmin
  133.     READ(1,*) zmax
  134.     READ(1,*) nprocs
  135.     READ(1,*) nprocs_x
  136.     READ(1,*) nprocs_y
  137.     READ(1,*) nprocs_z
  138.     CLOSE(1)
  139.  
  140.     bufx = 1
  141.     bufy = 1
  142.     bufz = 1
  143.    
  144.   END SUBROUTINE input
  145.  
  146.   !****************************************************************************************
  147.   ! SUBROUTINE tecplot_output
  148.   ! Outputs data in tecplot ASCII readable format
  149.   SUBROUTINE tecplot_output
  150.     IMPLICIT NONE
  151.     INTEGER :: i, j, k, ii
  152.     CHARACTER(len=24) :: file_name, file_name2
  153.  
  154.     IF (myid < 10) THEN
  155.        WRITE(file_name, 400) 'solution/solution_', myid, '.dat'
  156.     ELSE IF (myid .GE. 10) THEN
  157.        WRITE(file_name, 401) 'solution/solution_', myid, '.dat'
  158.     END IF
  159.    
  160. 400 FORMAT(A18, I1, A4)
  161. 401 FORMAT(A18, I2, A4)
  162.    
  163.     OPEN(unit=2, file = file_name, status='replace')
  164.     WRITE(2,'(A)') 'VARIABLES = "x", "y", "z", "u_init"'
  165.     WRITE(2,*) 'ZONE I=',nx+1,', J=',ny+1,', K=',nz+1,', ZONETYPE=ORDERED,'
  166.     WRITE(2,*) 'DATAPACKING=POINT, SOLUTIONTIME=0.0'
  167.     DO k = 0,nz
  168.        DO j = 0,ny
  169.           DO i = 0,nx
  170.              WRITE(2,*) x(i,j,k), y(i,j,k), z(i,j,k), u_init(i,j,k)
  171.           END DO
  172.        END DO
  173.     END DO
  174.  
  175.     CLOSE(2)
  176.  
  177.     DO ii = 0,nprocs_x-1
  178.        IF (myid .EQ. cpu_x(i,0)) THEN
  179.           IF (myid < 10) THEN
  180.              WRITE(file_name2, 400) 'solution/solution2d_', myid, '.dat'
  181.           ELSE IF (myid .GE. 10) THEN
  182.              WRITE(file_name2, 401) 'solution/solution2d_', myid, '.dat'
  183.           END IF
  184.          
  185.           OPEN(unit=3, file = file_name2, status='replace')
  186.           WRITE(3,'(A)') 'VARIABLES = "x", "y", "u_2d"'
  187.           WRITE(3,*) 'ZONE I=',nx+1,', J=',ny+1,', ZONETYPE=ORDERED,'
  188.           WRITE(3,*) 'DATAPACKING=POINT, SOLUTIONTIME=0.0'
  189.           DO j = 0,ny
  190.              DO i = 0,nx
  191.                 WRITE(3,*) x(i,j,0), y(i,j,0), u_2d_x_avg(i,j)
  192.              END DO
  193.           END DO
  194.          
  195.           CLOSE(3)
  196.        END IF
  197.     END DO
  198.    
  199.   END SUBROUTINE tecplot_output
  200.  
  201. END MODULE io
  202.  
  203. !****************************************************************************************
  204. !****************************************************************************************
  205. ! MODULE mpi_wrapper
  206. ! Contains subroutines needed for MPI
  207. MODULE mpi_wrapper
  208.   USE types_vars
  209.   USE variables
  210.   USE io
  211.   USE mpi
  212.   IMPLICIT NONE
  213.  
  214. CONTAINS
  215.  
  216.   !****************************************************************************************
  217.   ! SUBROUTINE init_parallel_environment
  218.   ! Initializes the MPI environment and sets the domain decomposition up
  219.   SUBROUTINE init_parallel_environment
  220.     IMPLICIT NONE
  221.    
  222.     CALL MPI_INIT(ierr)                               ! initializes MPI environment
  223.     CALL MPI_COMM_RANK(MPI_COMM_WORLD, myid, ierr)    ! gets the current rank, stores in myid
  224.     CALL input                                        ! reads input file (so the number of points will be for each process!)
  225.     CALL MPI_COMM_SIZE(MPI_COMM_WORLD, nprocs, ierr)  ! gets the number of processes, stores in nprocs (which is already read from input)
  226.     CALL create_3d_communicator                       ! calls subroutine that sets up source/destination for each process
  227.     CALL memalloc                                     ! allocate memory for each process
  228.     CALL MPI_BARRIER(MPI_COMM_WORLD, ierr)            ! wait for each process in this communicator to finish
  229.  
  230.   END SUBROUTINE init_parallel_environment
  231.  
  232.   !****************************************************************************************
  233.   ! SUBROUTINE create_3d_communicator
  234.   ! Creates the source and destination for each processor to be able to communicate with neighbors
  235.   SUBROUTINE create_3d_communicator
  236.     IMPLICIT NONE
  237.  
  238.     dims(1) = nprocs_x
  239.     dims(2) = nprocs_y
  240.     dims(3) = nprocs_z
  241.     isperiodic(1) = .FALSE.
  242.     isperiodic(2) = .FALSE.
  243.     isperiodic(3) = .FALSE.
  244.     reorder = .TRUE.
  245.     CALL MPI_CART_CREATE(MPI_COMM_WORLD, 3, dims, isperiodic, reorder, comm3d, ierr)
  246.     CALL MPI_COMM_RANK(comm3d, comm3d_rank, ierr)
  247.     CALL MPI_CART_COORDS(comm3d, comm3d_rank, 3, coords, ierr)
  248.     CALL MPI_CART_SHIFT(comm3d, 0, 1, xsrc, xdst, ierr)
  249.     CALL MPI_CART_SHIFT(comm3d, 1, 1, ysrc, ydst, ierr)
  250.     CALL MPI_CART_SHIFT(comm3d, 2, 1, zsrc, zdst, ierr)
  251.     IF(xsrc < 0) xsrc = -1
  252.     IF(ysrc < 0) ysrc = -1
  253.     IF(zsrc < 0) zsrc = -1
  254.     IF(xdst < 0) xdst = -1
  255.     IF(ydst < 0) ydst = -1
  256.     IF(zdst < 0) zdst = -1
  257.     PRINT*, 'myid = ', myid, 'xsrc = ', xsrc, 'xdst = ', xdst
  258.     PRINT*, 'myid = ', myid, 'ysrc = ', ysrc, 'ydst = ', ydst
  259.     PRINT*, 'myid = ', myid, 'zsrc = ', zsrc, 'zdst = ', zdst
  260.  
  261.   END SUBROUTINE create_3d_communicator
  262.  
  263.   !****************************************************************************************
  264.   ! SUBROUTINE fill_buffer
  265.   ! Fill the buffer zones for variables (ghost points)
  266.   SUBROUTINE fill_buffer
  267.     IMPLICIT NONE
  268.     INTEGER :: i, j, k
  269.  
  270.     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  271.     ! Xmin
  272.     DO k = 1,bufx
  273.        u(0-k,0:ny,0:nz)  = u(0,0:ny,0:nz)
  274.     END DO
  275.     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  276.     ! Xmax
  277.     DO k = 1,bufx
  278.        u(nx+k,0:ny,0:nz) = u(nx,0:ny,0:nz)
  279.     END DO
  280.     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  281.     ! Ymin
  282.     DO k = 1,bufy
  283.        u(0:nx,0-k,0:nz)  = u(0:nx,0,0:nz)
  284.     END DO
  285.     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  286.     ! Ymax
  287.     DO k = 1,bufy
  288.        u(0:nx,ny+k,0:nz) = u(0:nx,ny,0:nz)
  289.     END DO
  290.     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  291.     ! Zmin
  292.     DO k = 1,bufz
  293.        u(0:nx,0:ny,0-k)  = u(0:nx,0:ny,0)
  294.     END DO
  295.     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  296.     ! Zmax
  297.     DO k = 1,bufz
  298.        u(0:nx,0:nz,nz+k) = u(0:nx,0:ny,nz)
  299.     END DO
  300.  
  301.    
  302.   END SUBROUTINE fill_buffer
  303.  
  304.   !****************************************************************************************
  305.   ! SUBROUTINE close_parallel_environment
  306.   ! Closes the parallel MPI environment
  307.   SUBROUTINE close_parallel_environment
  308.     IMPLICIT NONE
  309.    
  310.     CALL dealloc
  311.     CALL MPI_FINALIZE(ierr)
  312.  
  313.   END SUBROUTINE close_parallel_environment
  314.    
  315. END MODULE mpi_wrapper
  316.  
  317. !****************************************************************************************
  318. !****************************************************************************************
  319. ! MODULE subroutines
  320. ! Contains all the miscellaneous subroutines
  321. MODULE subroutines
  322.   USE types_vars
  323.   USE variables
  324.   USE mpi_wrapper
  325.   USE mpi
  326.   IMPLICIT NONE
  327.  
  328. CONTAINS
  329.  
  330.   !****************************************************************************************
  331.   !SUBROUTINE create_output_dir_if_needed
  332.   !Create an output folder to store solution files
  333.   SUBROUTINE create_output_dir_if_needed
  334.     IMPLICIT NONE
  335.     CHARACTER(len=100) :: dir_name1
  336.     CHARACTER :: delimiter
  337.     LOGICAL :: dir_exists
  338.  
  339.     dir_exists = .FALSE.
  340.     dir_name1 = 'solution'
  341.     dir_name1 = TRIM(dir_name1)
  342.     CALL get_environment_variable('DELIMITER', delimiter)
  343.     INQUIRE(file = dir_name1//'/', exist = dir_exists)
  344.     IF (dir_exists .EQV. .TRUE.) THEN
  345.        PRINT*, 'Output directory exists...'
  346.     ELSE
  347.        PRINT*, 'No output directory exists, creating one...'
  348.        CALL system('mkdir '//TRIM(dir_name1))
  349.     END IF
  350.  
  351.   END SUBROUTINE create_output_dir_if_needed
  352.  
  353.   !****************************************************************************************
  354.   ! SUBROUTINE grid3d
  355.   ! Creates a 3d cartesian grid
  356.   SUBROUTINE grid3d
  357.     IMPLICIT NONE
  358.     INTEGER :: i, j, k
  359.     INTEGER :: is, ie, js, je, ks, ke
  360.     REAL(DP) :: lx, ly, lz
  361.     is = 0-bufx; ie = nx+bufx
  362.     js = 0-bufy; je = ny+bufy
  363.     ks = 0-bufz; ke = nz+bufz
  364.  
  365.     lx = ABS(xmax-xmin)/nprocs_x
  366.     ly = ABS(ymax-ymin)/nprocs_y
  367.     lz = ABS(zmax-zmin)/nprocs_z
  368.     dx = lx/DBLE(nx)
  369.     dy = ly/DBLE(ny)
  370.     dz = lz/DBLE(nz)
  371.  
  372.     x(is:ie,js:je,ks:ke) = 0.0d0
  373.     y(is:ie,js:je,ks:ke) = 0.0d0
  374.     z(is:ie,js:je,ks:ke) = 0.0d0
  375.     DO k = ks,ke
  376.        DO j = js,je
  377.           DO i = is,ie
  378.              x(i,j,k) = xmin + DBLE(coords(1))*lx + dx*DBLE(i)
  379.              y(i,j,k) = ymin + DBLE(coords(2))*ly + dy*DBLE(j)
  380.              z(i,j,k) = zmin + DBLE(coords(3))*lz + dz*DBLE(k)
  381.           END DO
  382.        END DO
  383.     END DO
  384.    
  385.   END SUBROUTINE grid3d
  386.  
  387.   !****************************************************************************************
  388.   ! SUBROUTINE init
  389.   ! Initializes the solution
  390.   SUBROUTINE init
  391.     IMPLICIT NONE
  392.     INTEGER :: i, j, k
  393.     INTEGER :: is, ie, js, je, ks, ke
  394.     is = 0-bufx; ie = nx+bufx
  395.     js = 0-bufy; je = ny+bufy
  396.     ks = 0-bufz; ke = nz+bufz
  397.  
  398.     DO k = ks,ke
  399.        DO j = js,je
  400.           DO i = is,ie
  401.              u(i,j,k) = 2.d0*DCOS(DSIN(z(i,j,k)))*(DCOS(x(i,j,k) + y(i,j,k) + z(i,j,k)) - &
  402.                   &    (1.d0 + x(i,j,k))*DSIN(x(i,j,k) + y(i,j,k) + z(i,j,k)))
  403.           END DO
  404.        END DO
  405.     END DO
  406.  
  407.     u_init(0:nx,0:ny,0:nz) = u(0:nx,0:ny,0:nz)
  408.    
  409.   END SUBROUTINE init
  410.  
  411.   !****************************************************************************************
  412.   ! SUBROUTINE spanAverage
  413.   ! Compute spanwise average
  414.   SUBROUTINE spanAverage
  415.     IMPLICIT NONE
  416.     INTEGER :: i, j, k
  417.     INTEGER :: sendcnt, recvcnt
  418.     REAL(DP), DIMENSION(0:nx,0:ny) :: u_2d
  419.     REAL(DP), DIMENSION(0:nx,0:ny,0:nprocs_x-1,0:nprocs_z-1) :: u_2d_x_buf
  420.     !~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  421.     ! For each processor, compute a spanwise average
  422.     u_2d(0:nx,0:ny) = 0.0d0
  423.     DO k = 0,nz
  424.        u_2d(0:nx,0:ny) = u_2d(0:nx,0:ny) + u(0:nx,0:ny,k)/DBLE(nz+1)
  425.     END DO
  426.     ! Now, we need to gather all spanwise averages into one buffer varaible
  427.     ! for that streamwise location. First, we need to identify the row of
  428.     ! spanwise CPUs for that streamwise location
  429.     DO i = 0,nprocs_x-1
  430.        DO k = 0,nprocs_z-1
  431.           cpu_x(i,k) = (nprocs_x)*k+i
  432.           PRINT*, cpu_x(i,k)
  433.        END DO
  434.     END DO
  435.     ! Now we know the row of CPUs for each streamwise location. Now we need to
  436.     ! send spanwise averages that are not on the first spanwise plane (right edge
  437.     ! of domain when looking at inlet), to the CPU on the first spanwise plane
  438.     ! at each respective streamwise location
  439.     DO i = 0,nprocs_x-1
  440.        DO k = 0,nprocs_z-1
  441.           IF (k .NE. 0) THEN ! it's not on the first spanwise plane of processors. send to same streamwise location on first spanwise plane
  442.              print*, 'about to send'
  443.              CALL MPI_SEND(u_2d(0,0),sendcnt,MPI_DOUBLE_PRECISION,cpu_x(i,0),0,comm3d,ierr)
  444.              ! CALL MPI_SENDRECV(u_2d(0,0), sendcnt, MPI_DOUBLE_PRECISION, &
  445.              !      &            cpu_x(i,0), 0, &
  446.              !      &            u_2d_x_buf(0,0,i,k), recvcnt, MPI_DOUBLE_PRECISION, &
  447.              !      &            cpu_x(i,k), 0, comm3d, MPI_STATUS_IGNORE, ierr)
  448.              PRINT*, 'SENT ', i, k
  449.           ELSE IF (k .EQ. 0) THEN ! it's on the first spanwise plane of processors. receive all other spanwise averages at that streamwise location
  450.              print*, 'about to receive'
  451.              CALL MPI_RECV(u_2d_x_buf(0,0,i,k),recvcnt,MPI_DOUBLE_PRECISION,MPI_ANY_SOURCE,MPI_ANY_TAG, &
  452.                   &        comm3d,MPI_STATUS_IGNORE,ierr)
  453.              PRINT*, 'RECEIVED ', i
  454.           END IF
  455.        END DO
  456.     END DO
  457.     ! For each streamwise location, compute the spanwise average
  458.     u_2d_x_avg(0:nx,0:ny) = 0.0d0
  459.     DO i = 0,nprocs_x-1
  460.        IF (myid .EQ. cpu_x(i,0)) THEN ! if the current CPU is on the first spanwise plane
  461.           DO k = 0,nprocs_z-1
  462.              u_2d_x_avg(0:nx,0:ny) = u_2d_x_avg(0:nx,0:ny) + u_2d_x_buf(0:nx,0:ny,i,k)/DBLE(nprocs_z)
  463.           END DO
  464.        END IF
  465.     END DO
  466.     ! Now for each streamwise location, you have the spanwise average. These need
  467.     ! to be printed in separate .dat files to be post-processed
  468.  
  469.   END SUBROUTINE spanAverage
  470.    
  471. END MODULE subroutines
  472.  
  473. !****************************************************************************************
  474. !****************************************************************************************
  475. ! PROGRAM main
  476. PROGRAM main
  477.  
  478.   USE types_vars
  479.   USE variables
  480.   USE io
  481.   USE mpi_wrapper
  482.   USE subroutines
  483.   USE mpi
  484.   IMPLICIT NONE
  485.  
  486.   CALL init_parallel_environment
  487.   CALL grid3d
  488.   CALL init
  489.   CALL fill_buffer
  490.   IF (myid == 0) CALL create_output_dir_if_needed
  491.   CALL spanAverage
  492.   CALL tecplot_output
  493.   CALL close_parallel_environment
  494.  
  495. END PROGRAM main
  496.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement