root / src / grid / HPL_grid_init.c @ 1
Historique | Voir | Annoter | Télécharger (7,69 ko)
1 |
/*
|
---|---|
2 |
* -- High Performance Computing Linpack Benchmark (HPL)
|
3 |
* HPL - 2.0 - September 10, 2008
|
4 |
* Antoine P. Petitet
|
5 |
* University of Tennessee, Knoxville
|
6 |
* Innovative Computing Laboratory
|
7 |
* (C) Copyright 2000-2008 All Rights Reserved
|
8 |
*
|
9 |
* -- Copyright notice and Licensing terms:
|
10 |
*
|
11 |
* Redistribution and use in source and binary forms, with or without
|
12 |
* modification, are permitted provided that the following conditions
|
13 |
* are met:
|
14 |
*
|
15 |
* 1. Redistributions of source code must retain the above copyright
|
16 |
* notice, this list of conditions and the following disclaimer.
|
17 |
*
|
18 |
* 2. Redistributions in binary form must reproduce the above copyright
|
19 |
* notice, this list of conditions, and the following disclaimer in the
|
20 |
* documentation and/or other materials provided with the distribution.
|
21 |
*
|
22 |
* 3. All advertising materials mentioning features or use of this
|
23 |
* software must display the following acknowledgement:
|
24 |
* This product includes software developed at the University of
|
25 |
* Tennessee, Knoxville, Innovative Computing Laboratory.
|
26 |
*
|
27 |
* 4. The name of the University, the name of the Laboratory, or the
|
28 |
* names of its contributors may not be used to endorse or promote
|
29 |
* products derived from this software without specific written
|
30 |
* permission.
|
31 |
*
|
32 |
* -- Disclaimer:
|
33 |
*
|
34 |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
35 |
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
36 |
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
37 |
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY
|
38 |
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
39 |
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
40 |
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
41 |
* DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
42 |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
43 |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
44 |
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
45 |
* ---------------------------------------------------------------------
|
46 |
*/
|
47 |
/*
|
48 |
* Include files
|
49 |
*/
|
50 |
#include "hpl.h" |
51 |
|
52 |
#ifdef STDC_HEADERS
|
53 |
int HPL_grid_init
|
54 |
( |
55 |
MPI_Comm COMM, |
56 |
const HPL_T_ORDER ORDER,
|
57 |
const int NPROW, |
58 |
const int NPCOL, |
59 |
HPL_T_grid * GRID |
60 |
) |
61 |
#else
|
62 |
int HPL_grid_init
|
63 |
( COMM, ORDER, NPROW, NPCOL, GRID ) |
64 |
MPI_Comm COMM; |
65 |
const HPL_T_ORDER ORDER;
|
66 |
const int NPROW; |
67 |
const int NPCOL; |
68 |
HPL_T_grid * GRID; |
69 |
#endif
|
70 |
{ |
71 |
/*
|
72 |
* Purpose
|
73 |
* =======
|
74 |
*
|
75 |
* HPL_grid_init creates a NPROW x NPCOL process grid using column- or
|
76 |
* row-major ordering from an initial collection of processes identified
|
77 |
* by an MPI communicator. Successful completion is indicated by the
|
78 |
* returned error code MPI_SUCCESS. Other error codes depend on the MPI
|
79 |
* implementation. The coordinates of processes that are not part of the
|
80 |
* grid are set to values outside of [0..NPROW) x [0..NPCOL).
|
81 |
*
|
82 |
* Arguments
|
83 |
* =========
|
84 |
*
|
85 |
* COMM (global/local input) MPI_Comm
|
86 |
* On entry, COMM is the MPI communicator identifying the
|
87 |
* initial collection of processes out of which the grid is
|
88 |
* formed.
|
89 |
*
|
90 |
* ORDER (global input) const HPL_T_ORDER
|
91 |
* On entry, ORDER specifies how the processes should be ordered
|
92 |
* in the grid as follows:
|
93 |
* ORDER = HPL_ROW_MAJOR row-major ordering;
|
94 |
* ORDER = HPL_COLUMN_MAJOR column-major ordering;
|
95 |
*
|
96 |
* NPROW (global input) const int
|
97 |
* On entry, NPROW specifies the number of process rows in the
|
98 |
* grid to be created. NPROW must be at least one.
|
99 |
*
|
100 |
* NPCOL (global input) const int
|
101 |
* On entry, NPCOL specifies the number of process columns in
|
102 |
* the grid to be created. NPCOL must be at least one.
|
103 |
*
|
104 |
* GRID (local input/output) HPL_T_grid *
|
105 |
* On entry, GRID points to the data structure containing the
|
106 |
* process grid information to be initialized.
|
107 |
*
|
108 |
* ---------------------------------------------------------------------
|
109 |
*/
|
110 |
/*
|
111 |
* .. Local Variables ..
|
112 |
*/
|
113 |
int hdim, hplerr=MPI_SUCCESS, ierr, ip2, k,
|
114 |
mask, mycol, myrow, nprocs, rank, size; |
115 |
/* ..
|
116 |
* .. Executable Statements ..
|
117 |
*/
|
118 |
MPI_Comm_rank( COMM, &rank ); MPI_Comm_size( COMM, &size ); |
119 |
/*
|
120 |
* Abort if illegal process grid
|
121 |
*/
|
122 |
nprocs = NPROW * NPCOL; |
123 |
if( ( nprocs > size ) || ( NPROW < 1 ) || ( NPCOL < 1 ) ) |
124 |
{ HPL_pabort( __LINE__, "HPL_grid_init", "Illegal Grid" ); } |
125 |
/*
|
126 |
* Row- or column-major ordering of the processes
|
127 |
*/
|
128 |
if( ORDER == HPL_ROW_MAJOR )
|
129 |
{ |
130 |
GRID->order = HPL_ROW_MAJOR; |
131 |
myrow = rank / NPCOL; mycol = rank - myrow * NPCOL; |
132 |
} |
133 |
else
|
134 |
{ |
135 |
GRID->order = HPL_COLUMN_MAJOR; |
136 |
mycol = rank / NPROW; myrow = rank - mycol * NPROW; |
137 |
} |
138 |
GRID->iam = rank; GRID->myrow = myrow; GRID->mycol = mycol; |
139 |
GRID->nprow = NPROW; GRID->npcol = NPCOL; GRID->nprocs = nprocs; |
140 |
/*
|
141 |
* row_ip2 : largest power of two <= nprow;
|
142 |
* row_hdim : row_ip2 procs hypercube dim;
|
143 |
* row_ip2m1 : largest power of two <= nprow-1;
|
144 |
* row_mask : row_ip2m1 procs hypercube mask;
|
145 |
*/
|
146 |
hdim = 0; ip2 = 1; k = NPROW; |
147 |
while( k > 1 ) { k >>= 1; ip2 <<= 1; hdim++; } |
148 |
GRID->row_ip2 = ip2; GRID->row_hdim = hdim; |
149 |
|
150 |
mask = ip2 = 1; k = NPROW - 1; |
151 |
while( k > 1 ) { k >>= 1; ip2 <<= 1; mask <<= 1; mask++; } |
152 |
GRID->row_ip2m1 = ip2; GRID->row_mask = mask; |
153 |
/*
|
154 |
* col_ip2 : largest power of two <= npcol;
|
155 |
* col_hdim : col_ip2 procs hypercube dim;
|
156 |
* col_ip2m1 : largest power of two <= npcol-1;
|
157 |
* col_mask : col_ip2m1 procs hypercube mask;
|
158 |
*/
|
159 |
hdim = 0; ip2 = 1; k = NPCOL; |
160 |
while( k > 1 ) { k >>= 1; ip2 <<= 1; hdim++; } |
161 |
GRID->col_ip2 = ip2; GRID->col_hdim = hdim; |
162 |
|
163 |
mask = ip2 = 1; k = NPCOL - 1; |
164 |
while( k > 1 ) { k >>= 1; ip2 <<= 1; mask <<= 1; mask++; } |
165 |
GRID->col_ip2m1 = ip2; GRID->col_mask = mask; |
166 |
/*
|
167 |
* All communicator, leave if I am not part of this grid. Creation of the
|
168 |
* row- and column communicators.
|
169 |
*/
|
170 |
ierr = MPI_Comm_split( COMM, ( rank < nprocs ? 0 : MPI_UNDEFINED ),
|
171 |
rank, &(GRID->all_comm) ); |
172 |
if( GRID->all_comm == MPI_COMM_NULL ) return( ierr ); |
173 |
|
174 |
ierr = MPI_Comm_split( GRID->all_comm, myrow, mycol, &(GRID->row_comm) ); |
175 |
if( ierr != MPI_SUCCESS ) hplerr = ierr;
|
176 |
|
177 |
ierr = MPI_Comm_split( GRID->all_comm, mycol, myrow, &(GRID->col_comm) ); |
178 |
if( ierr != MPI_SUCCESS ) hplerr = ierr;
|
179 |
|
180 |
return( hplerr );
|
181 |
/*
|
182 |
* End of HPL_grid_init
|
183 |
*/
|
184 |
} |