download the original source code.
  1 !
  2 !   Example 5
  3 !
  4 !   Interface:    Linear-Algebraic (IJ), Babel-based version in Fortran90/9x
  5 !
  6 !   Compile with: make ex5b90
  7 !
  8 !   Sample run:   mpirun -np 4 ex5b90
  9 !
 10 !   Description:  This example solves the 2-D
 11 !                 Laplacian problem with zero boundary conditions
 12 !                 on an nxn grid.  The number of unknowns is N=n^2.
 13 !                 The standard 5-point stencil is used, and we solve
 14 !                 for the interior nodes only.
 15 !
 16 !                 This example solves the same problem as Example 3.
 17 !                 Available solvers are AMG, PCG, and PCG with AMG or
 18 !                 Parasails preconditioners.
 19 
 20 ! HOW TO BUILD:
 21 ! Fortran 90 is not yet fully supported by the hypre build system, so this
 22 ! is a little more complicated than C or Fortran 77.  These instructions have
 23 ! only been tried in one environment so far.
 24 ! 1. Make sure you have a Fortran 90 compiler!
 25 ! 2. Make sure your MPI library supports Fortran 90, true if it supplies an
 26 !    mpi.mod file and mpif90 compiler-wrapper.
 27 ! 3. Set the environment variable FC to your mpif90, the MPI wrapper for your
 28 !    Fortran 90 compiler.
 29 ! 4. Install Chasm, which Babel requires for handling Fortran arrays.  See
 30 !    the Babel users manual for more information.
 31 ! 5. Set the environment variable CHASMPREFIX, same as your --prefix option
 32 !    in configuring Chasm.
 33 ! 6. Run hypre's 'configure --with-babel ...' and make.
 34 ! 7. cd babel/bHYPREClient-F90; make
 35 ! 8. cd examples; make ex5b90
 36 
 37 
 38 program ex5b90
 39 
 40   use mpi
 41 !  ... reguires mpi.mod, not available (I think) on my machine's (public) old version
 42 ! of mpich, but works with my private latest version of OpenMPI.(JfP)
 43 ! If a real Fortran 90 mpi.mod isn't available, the only alternative is to include
 44 ! the Fortran 77 one, plus corresponding changes thereafter ...
 45 !#include "mpif.h"
 46 
 47   use sidl_SIDLException
 48   use bHYPRE_BoomerAMG
 49   use bHYPRE_MPICommunicator
 50   use bHYPRE_IJParCSRMatrix
 51   use bHYPRE_IJParCSRVector
 52 
 53   integer, parameter::  MAX_LOCAL_SIZE = 123000
 54 
 55   integer    ierr, ierrtmp
 56   integer    num_procs, myid
 57   integer    local_size, extra
 58   integer    n, solver_id, print_solution, ng
 59   integer    nnz, ilower, iupper, i
 60   real(8)    h, h2
 61   real(8)    rhs_values(MAX_LOCAL_SIZE)
 62   real(8)    x_values(MAX_LOCAL_SIZE)
 63   integer    rows(MAX_LOCAL_SIZE)
 64   integer    cols(5)
 65   real(8)    values(5)
 66   integer    num_iterations
 67   real(8)    final_res_norm, tol
 68   integer(8) mpi_comm
 69 
 70   type(bHYPRE_MPICommunicator_t) bHYPRE_mpicomm
 71   type(bHYPRE_IJParCSRMatrix_t)  parcsr_A
 72   type(bHYPRE_Operator_t)        op_A
 73   type(bHYPRE_IJParCSRVector_t)  par_b
 74   type(bHYPRE_IJParCSRVector_t)  par_x
 75   type(bHYPRE_Vector_t)          vec_b
 76   type(bHYPRE_Vector_t)          vec_x
 77   type(bHYPRE_BoomerAMG_t)       amg_solver
 78   type(sidl_SIDLException_t)     except
 79   !     ... except is for Babel exceptions, which we shall ignore
 80 
 81 !-----------------------------------------------------------------------
 82 !     Initialize MPI
 83 !-----------------------------------------------------------------------
 84 
 85       call MPI_INIT(ierr)
 86       call MPI_COMM_RANK(MPI_COMM_WORLD, myid, ierr)
 87       call MPI_COMM_SIZE(MPI_COMM_WORLD, num_procs, ierr)
 88       mpi_comm = MPI_COMM_WORLD
 89       call bHYPRE_MPICommunicator_CreateF_f( mpi_comm, bHYPRE_mpicomm, except )
 90 
 91 !   Default problem parameters
 92       n = 33
 93       solver_id = 0
 94       print_solution  = 0
 95       tol = 1.0d-7
 96 
 97 !   The input section not implemented yet.
 98 
 99 !   Preliminaries: want at least one processor per row
100       if ( n*n .lt. num_procs) then
101          n = int(sqrt(real(num_procs))) + 1
102       endif
103 !     ng = global no. rows, h = mesh size      
104       ng = n*n
105       h = 1.0d0/(n+1)
106       h2 = h*h
107 
108 !     Each processor knows only of its own rows - the range is denoted by ilower
109 !     and upper.  Here we partition the rows. We account for the fact that
110 !     N may not divide evenly by the number of processors.
111       local_size = ng/num_procs
112       extra = ng - local_size*num_procs
113 
114       ilower = local_size*myid
115       ilower = ilower + min(myid, extra)
116 
117       iupper = local_size*(myid+1)
118       iupper = iupper + min(myid+1, extra)
119       iupper = iupper - 1
120 
121 !     How many rows do I have?
122       local_size = iupper - ilower + 1
123 
124 !     Create the matrix.
125 !     Note that this is a square matrix, so we indicate the row partition
126 !     size twice (since number of rows = number of cols)
127       call bHYPRE_IJParCSRMatrix_Create_f( bHYPRE_mpicomm, ilower, iupper, ilower, iupper, &
128            parcsr_A, except )
129 
130 !     op_A will be needed later as a function argument
131       call bHYPRE_Operator__cast_f( parcsr_A, op_A, except )
132 
133 !     Choose a parallel csr format storage (see the User's Manual)
134 !     Note: Here the HYPRE interface requires a SetObjectType call.
135 !     I am using the bHYPRE interface in a way which does not because
136 !     the object type is already specified through the class name.
137 
138 !     Initialize before setting coefficients
139       call bHYPRE_IJParCSRMatrix_Initialize_f( parcsr_A, ierrtmp, except )
140 
141 !     Now go through my local rows and set the matrix entries.
142 !     Each row has at most 5 entries. For example, if n=3:
143 !
144 !      A = [M -I 0; -I M -I; 0 -I M]
145 !      M = [4 -1 0; -1 4 -1; 0 -1 4]
146 !
147 !     Note that here we are setting one row at a time, though
148 !     one could set all the rows together (see the User's Manual).
149 
150       do i = ilower, iupper
151          nnz = 1
152 
153 !        The left identity block:position i-n
154          if ( (i-n) .ge. 0 ) then
155 	    cols(nnz) = i-n
156 	    values(nnz) = -1.0d0
157 	    nnz = nnz + 1
158          endif
159 
160 !         The left -1: position i-1
161          if ( mod(i,n).ne.0 ) then
162             cols(nnz) = i-1
163             values(nnz) = -1.0d0
164             nnz = nnz + 1
165          endif
166 
167 !        Set the diagonal: position i
168          cols(nnz) = i
169          values(nnz) = 4.0d0
170          nnz = nnz + 1
171 
172 !        The right -1: position i+1
173          if ( mod((i+1),n) .ne. 0 ) then
174             cols(nnz) = i+1
175             values(nnz) = -1.0d0
176             nnz = nnz + 1
177          endif
178 
179 !        The right identity block:position i+n
180          if ((i+n) .lt. ng ) then
181             cols(nnz) = i+n
182             values(nnz) = -1.0d0
183             nnz = nnz + 1
184          endif
185 
186 !        Set the values for row i
187          call bHYPRE_IJParCSRMatrix_SetValues_f( parcsr_A, 1, nnz-1, i, cols, values, 5, &
188               ierrtmp, except )
189 
190       enddo
191 
192 !     Assemble after setting the coefficients
193       call bHYPRE_IJParCSRMatrix_Assemble_f( parcsr_A, ierrtmp, except )
194 
195 !     Create the rhs and solution
196       call bHYPRE_IJParCSRVector_Create_f( bHYPRE_mpicomm, ilower, iupper, par_b, except )
197 !     vec_b will be needed later for function arguments
198       call bHYPRE_Vector__cast_f( par_b, vec_b, except )
199 
200       call bHYPRE_IJParCSRVector_Initialize_f( par_b, ierrtmp, except )
201 
202       call bHYPRE_IJParCSRVector_Create_f( bHYPRE_mpicomm, ilower, iupper, par_x, except )
203 !     vec_x will be needed later for function arguments
204       call bHYPRE_Vector__cast_f( par_x, vec_x, except )
205 
206       call bHYPRE_IJParCSRVector_Initialize_f( par_x, ierrtmp, except )
207 
208 !     Set the rhs values to h^2 and the solution to zero
209       do i = 1, local_size
210          rhs_values(i) = h2
211          x_values(i) = 0.0
212          rows(i) = ilower + i -1
213       enddo
214       call bHYPRE_IJParCSRVector_SetValues_f( par_b, local_size, rows, rhs_values, &
215            ierrtmp, except )
216       call bHYPRE_IJParCSRVector_SetValues_f( par_x, local_size, rows, x_values, ierrtmp, except )
217 
218 
219       call bHYPRE_IJParCSRVector_Assemble_f( par_b, ierrtmp, except )
220       call bHYPRE_IJParCSRVector_Assemble_f( par_x, ierrtmp, except )
221 
222 !     Choose a solver and solve the system
223 
224 !      AMG
225       if ( solver_id == 0 ) then
226 
227 !        Create solver
228          call bHYPRE_BoomerAMG_Create_f( bHYPRE_mpicomm, parcsr_A, amg_solver, except )
229 
230 !        Set some parameters (See Reference Manual for more parameters)
231 !        PrintLevel=3 means print solve info + parameters
232 !        CoarsenType=6 means Falgout coarsening
233 !        RelaxType=3 means Gauss-Seidel/Jacobi hybrid relaxation
234          call bHYPRE_BoomerAMG_SetIntParameter_f( amg_solver, "PrintLevel", 3, ierrtmp, except )
235          call bHYPRE_BoomerAMG_SetIntParameter_f( amg_solver, "CoarsenType", 6, ierrtmp, except )
236          call bHYPRE_BoomerAMG_SetIntParameter_f( amg_solver, "RelaxType", 3, ierrtmp, except )
237          call bHYPRE_BoomerAMG_SetIntParameter_f( amg_solver, "NumSweeps", 1, ierrtmp, except )
238          call bHYPRE_BoomerAMG_SetIntParameter_f( amg_solver, "MaxLevels", 20, ierrtmp, except )
239          call bHYPRE_BoomerAMG_SetDoubleParameter_f( amg_solver, "Tolerance", tol, ierrtmp, &
240               except )
241 
242 !        Now setup and solve!
243          call bHYPRE_BoomerAMG_Setup_f( amg_solver, vec_b, vec_x, ierrtmp, except )
244          call bHYPRE_BoomerAMG_Apply_f( amg_solver, vec_b, vec_x, ierrtmp, except )
245 
246 !        Run info - needed logging turned on 
247          call bHYPRE_BoomerAMG_GetIntValue_f( amg_solver, "NumIterations", num_iterations, &
248               ierrtmp, except )
249          ierr = ierr + ierrtmp
250          call bHYPRE_BoomerAMG_GetDoubleValue_f( amg_solver, "RelResidualNorm", final_res_norm, &
251               ierrtmp, except )
252 
253       if (myid .eq. 0) then
254          print *
255          print *, "Iterations = ", num_iterations
256          print *, "Final Relative Residual Norm = ", final_res_norm
257          print *
258       endif
259 
260 !     Destroy solver
261       call bHYPRE_BoomerAMG_deleteRef_f( amg_solver, except )
262 
263       endif
264 
265 !     The calls of other solvers are not implemented yet.
266 
267 
268 !     Print the solution
269       if ( print_solution .ne. 0 ) then
270          call bHYPRE_IJParCSRVector_Print_f( par_x, "ij.out.x", except )
271       endif
272 
273 !     Clean up
274       call bHYPRE_Operator_deleteRef_f( op_A, except )
275       call bHYPRE_Vector_deleteRef_f( vec_b, except )
276       call bHYPRE_Vector_deleteRef_f( vec_x, except )
277       call bHYPRE_IJParCSRMatrix_deleteRef_f( parcsr_A, except )
278       call bHYPRE_IJParCSRVector_deleteRef_f( par_b, except )
279       call bHYPRE_IJParCSRVector_deleteRef_f( par_x, except )
280       call bHYPRE_MPICommunicator_deleteRef_f( bHYPRE_mpicomm, except )
281 
282 !     We need a multi-language equivalent of hypre_assert.
283       if ( ierr .ne. 0 ) then
284          print *
285          print *, "***** Bad ierr = ", ierr
286          print *
287       endif
288 
289 !     Finalize MPI
290       call MPI_Finalize(ierrtmp)
291 
292       stop
293       end


syntax highlighted by Code2HTML, v. 0.9.1