Statistiques
| Révision :

root / src / comm / HPL_2rinM.c @ 1

Historique | Voir | Annoter | Télécharger (7,97 ko)

1
/* 
2
 * -- High Performance Computing Linpack Benchmark (HPL)                
3
 *    HPL - 2.0 - September 10, 2008                          
4
 *    Antoine P. Petitet                                                
5
 *    University of Tennessee, Knoxville                                
6
 *    Innovative Computing Laboratory                                 
7
 *    (C) Copyright 2000-2008 All Rights Reserved                       
8
 *                                                                      
9
 * -- Copyright notice and Licensing terms:                             
10
 *                                                                      
11
 * Redistribution  and  use in  source and binary forms, with or without
12
 * modification, are  permitted provided  that the following  conditions
13
 * are met:                                                             
14
 *                                                                      
15
 * 1. Redistributions  of  source  code  must retain the above copyright
16
 * notice, this list of conditions and the following disclaimer.        
17
 *                                                                      
18
 * 2. Redistributions in binary form must reproduce  the above copyright
19
 * notice, this list of conditions,  and the following disclaimer in the
20
 * documentation and/or other materials provided with the distribution. 
21
 *                                                                      
22
 * 3. All  advertising  materials  mentioning  features  or  use of this
23
 * software must display the following acknowledgement:                 
24
 * This  product  includes  software  developed  at  the  University  of
25
 * Tennessee, Knoxville, Innovative Computing Laboratory.             
26
 *                                                                      
27
 * 4. The name of the  University,  the name of the  Laboratory,  or the
28
 * names  of  its  contributors  may  not  be used to endorse or promote
29
 * products  derived   from   this  software  without  specific  written
30
 * permission.                                                          
31
 *                                                                      
32
 * -- Disclaimer:                                                       
33
 *                                                                      
34
 * THIS  SOFTWARE  IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,  INCLUDING,  BUT NOT
36
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY
38
 * OR  CONTRIBUTORS  BE  LIABLE FOR ANY  DIRECT,  INDIRECT,  INCIDENTAL,
39
 * SPECIAL,  EXEMPLARY,  OR  CONSEQUENTIAL DAMAGES  (INCLUDING,  BUT NOT
40
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41
 * DATA OR PROFITS; OR BUSINESS INTERRUPTION)  HOWEVER CAUSED AND ON ANY
42
 * THEORY OF LIABILITY, WHETHER IN CONTRACT,  STRICT LIABILITY,  OR TORT
43
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
45
 * ---------------------------------------------------------------------
46
 */ 
47
/*
48
 * Include files
49
 */
50
#include "hpl.h"
51

    
52
#ifdef HPL_NO_MPI_DATATYPE  /* The user insists to not use MPI types */
53
#ifndef HPL_COPY_L       /* and also want to avoid the copy of L ... */
54
#define HPL_COPY_L   /* well, sorry, can not do that: force the copy */
55
#endif
56
#endif
57

    
58
#ifdef STDC_HEADERS
59
int HPL_binit_2rinM
60
(
61
   HPL_T_panel *              PANEL
62
)
63
#else
64
int HPL_binit_2rinM( PANEL )
65
   HPL_T_panel *              PANEL;
66
#endif
67
{
68
#ifdef HPL_USE_MPI_DATATYPE
69
/*
70
 * .. Local Variables ..
71
 */
72
   int                        ierr;
73
#endif
74
/* ..
75
 * .. Executable Statements ..
76
 */
77
   if( PANEL == NULL )           { return( HPL_SUCCESS ); }
78
   if( PANEL->grid->npcol <= 1 ) { return( HPL_SUCCESS ); }
79
#ifdef HPL_USE_MPI_DATATYPE
80
#ifdef HPL_COPY_L
81
/*
82
 * Copy the panel into a contiguous buffer
83
 */
84
   HPL_copyL( PANEL );
85
#endif
86
/*
87
 * Create the MPI user-defined data type
88
 */
89
   ierr = HPL_packL( PANEL, 0, PANEL->len, 0 );
90
 
91
   return( ( ierr == MPI_SUCCESS ? HPL_SUCCESS : HPL_FAILURE ) );
92
#else
93
/*
94
 * Force the copy of the panel into a contiguous buffer
95
 */
96
   HPL_copyL( PANEL );
97
 
98
   return( HPL_SUCCESS );
99
#endif
100
}
101

    
102
#ifdef HPL_USE_MPI_DATATYPE
103
 
104
#define   _M_BUFF     PANEL->buffers[0]
105
#define   _M_COUNT    PANEL->counts[0]
106
#define   _M_TYPE     PANEL->dtypes[0]
107
 
108
#else
109
 
110
#define   _M_BUFF     (void *)(PANEL->L2)
111
#define   _M_COUNT    PANEL->len
112
#define   _M_TYPE     MPI_DOUBLE
113
 
114
#endif
115

    
116
#ifdef STDC_HEADERS
117
int HPL_bcast_2rinM
118
(
119
   HPL_T_panel                * PANEL,
120
   int                        * IFLAG
121
)
122
#else
123
int HPL_bcast_2rinM( PANEL, IFLAG )
124
   HPL_T_panel                * PANEL;
125
   int                        * IFLAG;
126
#endif
127
{
128
/*
129
 * .. Local Variables ..
130
 */
131
   MPI_Comm                   comm;
132
   int                        ierr, go, next, msgid, partner, prev,
133
                              rank, roo2, root, size;
134
/* ..
135
 * .. Executable Statements ..
136
 */
137
   if( PANEL == NULL ) { *IFLAG = HPL_SUCCESS; return( HPL_SUCCESS ); }
138
   if( ( size = PANEL->grid->npcol ) <= 1 )
139
   {                     *IFLAG = HPL_SUCCESS; return( HPL_SUCCESS ); }
140
/*
141
 * Cast phase: root process send to its two right neighbors and mid-pro-
142
 * cess. If I am not the root process, probe for message. If the message
143
 * is there, then receive it. If I am not the last process of both rings
144
 * then forward it to the next.  Otherwise,  inform  the caller that the
145
 * panel has still not been received.
146
 */
147
   rank = PANEL->grid->mycol;           comm  = PANEL->grid->row_comm;
148
   root = PANEL->pcol;                  msgid = PANEL->msgid;
149
   next = MModAdd1( rank, size );       roo2  = ( ( size + 1 ) >> 1 );
150
   roo2 = MModAdd(  root, roo2, size );
151
 
152
   if( rank == root )
153
   {
154
      ierr = MPI_Send( _M_BUFF, _M_COUNT, _M_TYPE, next, msgid, comm );
155

    
156
      if( ( ierr == MPI_SUCCESS ) && ( size > 2 ) )
157
      {
158
         if( MModAdd1( next, size ) != roo2 )
159
         {
160
            ierr = MPI_Send( _M_BUFF, _M_COUNT, _M_TYPE,
161
                             MModAdd1( next, size ), msgid, comm );
162
         }
163

    
164
         if( ierr == MPI_SUCCESS )
165
         {
166
            ierr = MPI_Send( _M_BUFF, _M_COUNT, _M_TYPE, roo2, msgid,
167
                             comm );
168
         }
169
      }
170
   }
171
   else
172
   {
173
      prev = MModSub1( rank, size );
174
      if( ( prev == root ) || ( rank == roo2 ) ||
175
          ( MModSub1( prev,  size )  == root ) ) partner = root;
176
      else                                       partner = prev;
177
 
178
      ierr = MPI_Iprobe( partner, msgid, comm, &go, &PANEL->status[0] );
179

    
180
      if( ierr == MPI_SUCCESS )
181
      {
182
         if( go != 0 )
183
         {
184
            ierr = MPI_Recv( _M_BUFF, _M_COUNT, _M_TYPE, partner, msgid,
185
                             comm, &PANEL->status[0] );
186
            if( ( ierr == MPI_SUCCESS ) && ( prev != root ) &&
187
                ( next != roo2        ) && ( next != root ) )
188
            {
189
               ierr = MPI_Send( _M_BUFF, _M_COUNT, _M_TYPE, next, msgid,
190
                                comm );
191
            }
192
         }
193
         else { *IFLAG = HPL_KEEP_TESTING; return( *IFLAG ); }
194
      }
195
   }
196
/*
197
 * If the message was received and being forwarded,  return HPL_SUCCESS.
198
 * If an error occured in an MPI call, return HPL_FAILURE.
199
 */
200
   *IFLAG = ( ierr == MPI_SUCCESS ? HPL_SUCCESS : HPL_FAILURE );
201

    
202
   return( *IFLAG );
203
} 
204

    
205
#ifdef STDC_HEADERS
206
int HPL_bwait_2rinM
207
(
208
   HPL_T_panel *              PANEL
209
)
210
#else
211
int HPL_bwait_2rinM( PANEL )
212
   HPL_T_panel *              PANEL;
213
#endif
214
{
215
#ifdef HPL_USE_MPI_DATATYPE
216
/*
217
 * .. Local Variables ..
218
 */
219
   int                        ierr;
220
#endif
221
/* ..
222
 * .. Executable Statements ..
223
 */
224
   if( PANEL == NULL )           { return( HPL_SUCCESS ); }
225
   if( PANEL->grid->npcol <= 1 ) { return( HPL_SUCCESS ); }
226
/*
227
 * Release the arrays of request / status / data-types and buffers
228
 */
229
#ifdef HPL_USE_MPI_DATATYPE
230
   ierr = MPI_Type_free( &PANEL->dtypes[0] );
231
 
232
   return( ( ierr == MPI_SUCCESS ? HPL_SUCCESS : HPL_FAILURE ) );
233
#else
234
   return( HPL_SUCCESS );
235
#endif
236
}