PEXSI
 All Classes Namespaces Files Functions Variables Typedefs Pages
mpi_interf.hpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2012 The Regents of the University of California,
3  through Lawrence Berkeley National Laboratory.
4 
5  Author: Lin Lin
6 
7  This file is part of PEXSI. All rights reserved.
8 
9  Redistribution and use in source and binary forms, with or without
10  modification, are permitted provided that the following conditions are met:
11 
12  (1) Redistributions of source code must retain the above copyright notice, this
13  list of conditions and the following disclaimer.
14  (2) Redistributions in binary form must reproduce the above copyright notice,
15  this list of conditions and the following disclaimer in the documentation
16  and/or other materials provided with the distribution.
17  (3) Neither the name of the University of California, Lawrence Berkeley
18  National Laboratory, U.S. Dept. of Energy nor the names of its contributors may
19  be used to endorse or promote products derived from this software without
20  specific prior written permission.
21 
22  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
23  ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24  WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25  DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
26  ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27  (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29  ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33  You are under no obligation whatsoever to provide any bug fixes, patches, or
34  upgrades to the features, functionality or performance of the source code
35  ("Enhancements") to anyone; however, if you choose to make your Enhancements
36  available either publicly, or directly to Lawrence Berkeley National
37  Laboratory, without imposing a separate written license agreement for such
38  Enhancements, then you hereby grant the following license: a non-exclusive,
39  royalty-free perpetual license to install, use, modify, prepare derivative
40  works, incorporate into other computer software, distribute, and sublicense
41  such enhancements or derivative works thereof, in binary and source code form.
42 */
46 #ifndef _PEXSI_MPI_HPP_
47 #define _PEXSI_MPI_HPP_
48 
49 #include "pexsi/environment.hpp"
50 
51 namespace PEXSI{
52 
56 namespace mpi{
57 
58 
59 // *********************************************************************
60 // Gatherv
61 //
62 // NOTE: The interface is quite preliminary.
63 // *********************************************************************
64 void Gatherv(
65  std::vector<Int>& localVec,
66  std::vector<Int>& allVec,
67  Int root,
68  MPI_Comm comm );
69 
70 
71 
72 template<typename T>
73 void
74 Gatherv (
75  std::vector<T>& localVec,
76  std::vector<T>& allVec,
77  Int root,
78  MPI_Comm comm )
79 {
80 #ifndef _RELEASE_
81  PushCallStack("mpi::Gatherv");
82 #endif
83  Int mpirank, mpisize;
84  MPI_Comm_rank( comm, &mpirank );
85  MPI_Comm_size( comm, &mpisize );
86 
87  Int localSize = localVec.size()*sizeof(T);
88  std::vector<Int> localSizeVec( mpisize );
89  MPI_Gather( &localSize, 1, MPI_INT, &localSizeVec[0], 1, MPI_INT,root, comm );
90 
91  if(mpirank==root){
92  std::vector<Int> localSizeDispls( mpisize );
93  localSizeDispls[0] = 0;
94  for( Int ip = 1; ip < mpisize; ip++ ){
95  localSizeDispls[ip] = localSizeDispls[ip-1] + localSizeVec[ip-1]*sizeof(T);
96  }
97  Int totalSize = localSizeDispls[mpisize-1] + localSizeVec[mpisize-1]*sizeof(T);
98 
99  allVec.clear();
100  allVec.resize( totalSize / sizeof(T) );
101 
102  MPI_Gatherv( &localVec[0], localSize, MPI_BYTE, &allVec[0],
103  &localSizeVec[0], &localSizeDispls[0], MPI_BYTE, root, comm );
104  }
105  else{
106  MPI_Gatherv( &localVec[0], localSize, MPI_BYTE, NULL,
107  NULL, NULL, MPI_INT, root, comm );
108  }
109 #ifndef _RELEASE_
110  PopCallStack();
111 #endif
112 
113  return ;
114 } // ----- end of function Gatherv -----
115 
116 
117 void
118 Gatherv (
119  std::vector<Int>& localVec,
120  std::vector<Int>& allVec,
121  std::vector<Int>& sizes,
122  std::vector<Int>& displs,
123  Int root,
124  MPI_Comm comm );
125 
126 
127 template<typename T>
128 void
129 Gatherv (
130  std::vector<T>& localVec,
131  std::vector<T>& allVec,
132  std::vector<T>& sizes,
133  std::vector<T>& displs,
134  Int root,
135  MPI_Comm comm )
136 {
137 #ifndef _RELEASE_
138  PushCallStack("mpi::Gatherv");
139 #endif
140  Int mpirank, mpisize;
141  MPI_Comm_rank( comm, &mpirank );
142  MPI_Comm_size( comm, &mpisize );
143 
144  Int localSize = localVec.size()*sizeof(T);
145 
146  if(mpirank==root){
147  std::vector<Int> & localSizeVec = sizes;
148  localSizeVec.resize( mpisize );
149  MPI_Gather( &localSize, 1, MPI_INT, &localSizeVec[0], 1, MPI_INT,root, comm );
150  std::vector<Int> & localSizeDispls = displs;
151  localSizeDispls.resize( mpisize );
152  localSizeDispls[0] = 0;
153  for( Int ip = 1; ip < mpisize; ip++ ){
154  localSizeDispls[ip] = localSizeDispls[ip-1] + localSizeVec[ip-1]*sizeof(T);
155  }
156  Int totalSize = localSizeDispls[mpisize-1] + localSizeVec[mpisize-1]*sizeof(T);
157 
158  allVec.clear();
159  allVec.resize( totalSize / sizeof(T) );
160 
161  MPI_Gatherv( &localVec[0], localSize, MPI_BYTE, &allVec[0],
162  &localSizeVec[0], &localSizeDispls[0], MPI_BYTE, root, comm );
163  }
164  else{
165  MPI_Gather( &localSize, 1, MPI_INT, NULL, 1, MPI_INT,root, comm );
166  MPI_Gatherv( &localVec[0], localSize, MPI_BYTE, NULL,
167  NULL, NULL, MPI_INT, root, comm );
168  }
169 #ifndef _RELEASE_
170  PopCallStack();
171 #endif
172 
173  return ;
174 } // ----- end of function Gatherv -----
175 
176 
177 
178 
179 
180 // *********************************************************************
181 // Allgatherv
182 //
183 // NOTE: The interface is quite preliminary.
184 // *********************************************************************
185 void Allgatherv(
186  std::vector<Int>& localVec,
187  std::vector<Int>& allVec,
188  MPI_Comm comm );
189 
190 
191 
192 
193 
194 template <typename T>
195 void
196 Allgatherv (
197  std::vector<T>& localVec,
198  std::vector<T>& allVec,
199  MPI_Comm comm )
200 {
201 #ifndef _RELEASE_
202  PushCallStack("mpi::Allgatherv");
203 #endif
204  Int mpirank, mpisize;
205  MPI_Comm_rank( comm, &mpirank );
206  MPI_Comm_size( comm, &mpisize );
207 
208  Int localSize = localVec.size()*sizeof(T);
209  std::vector<Int> localSizeVec( mpisize );
210  std::vector<Int> localSizeDispls( mpisize );
211  MPI_Allgather( &localSize, 1, MPI_INT, &localSizeVec[0], 1, MPI_INT, comm );
212  localSizeDispls[0] = 0;
213  for( Int ip = 1; ip < mpisize; ip++ ){
214  localSizeDispls[ip] = (localSizeDispls[ip-1] + localSizeVec[ip-1])*sizeof(T);
215  }
216  Int totalSize = (localSizeDispls[mpisize-1] + localSizeVec[mpisize-1])*sizeof(T);
217 
218  allVec.clear();
219  allVec.resize( totalSize/sizeof(T) );
220 
221  MPI_Allgatherv( &localVec[0], localSize, MPI_BYTE, &allVec[0],
222  &localSizeVec[0], &localSizeDispls[0], MPI_BYTE, comm );
223 
224 #ifndef _RELEASE_
225  PopCallStack();
226 #endif
227 
228  return ;
229 }; // ----- end of function Allgatherv -----
230 
231 
232 
233 
234 
235 
236 template <typename T>
237 void
238 Bcast (
239  std::vector<T>& dataVec,
240  Int root,
241  MPI_Comm comm )
242 {
243 #ifndef _RELEASE_
244  PushCallStack("mpi::Bcast");
245 #endif
246  Int mpirank, mpisize;
247  MPI_Comm_rank( comm, &mpirank );
248  MPI_Comm_size( comm, &mpisize );
249 
250  Int localSize = dataVec.size();
251  MPI_Bcast( &localSize, sizeof(localSize), MPI_BYTE, root , comm );
252 
253  if(mpirank!=root){
254  dataVec.clear();
255  dataVec.resize(localSize);
256  }
257 
258  MPI_Bcast( &dataVec[0], localSize*sizeof(T), MPI_BYTE, root , comm );
259 
260 #ifndef _RELEASE_
261  PopCallStack();
262 #endif
263 
264  return ;
265 }; // ----- end of function Bcast -----
266 
267 
268 
269 
270 
271 
272 
273 
274 
275 
276 // *********************************************************************
277 // Send / Recv for stringstream
278 //
279 // Isend / Irecv is not here because the size and content has to be
280 // communicated separately for non-blocking communication.
281 // *********************************************************************
282 
283 void Send( std::stringstream& sstm, Int dest, Int tagSize, Int tagContent,
284  MPI_Comm comm );
285 
286 void Recv ( std::stringstream& sstm, Int src, Int tagSize, Int tagContent,
287  MPI_Comm comm, MPI_Status& statSize, MPI_Status& statContent );
288 
289 void Recv ( std::stringstream& sstm, Int src, Int tagSize, Int tagContent,
290  MPI_Comm comm );
291 
292 // *********************************************************************
293 // Waitall
294 // *********************************************************************
295 
296 void
297 Wait ( MPI_Request& req );
298 
299 void
300 Waitall ( std::vector<MPI_Request>& reqs, std::vector<MPI_Status>& stats );
301 
302 void
303 Waitall ( std::vector<MPI_Request>& reqs );
304 
305 // *********************************************************************
306 // Reduce
307 // *********************************************************************
308 
309 void
310 Reduce ( Real* sendbuf, Real* recvbuf, Int count, MPI_Op op, Int root, MPI_Comm comm );
311 
312 void
313 Reduce ( Complex* sendbuf, Complex* recvbuf, Int count, MPI_Op op, Int root, MPI_Comm comm );
314 
315 
316 #ifdef _USE_MPI3_
317 void
318 Ireduce ( Real* sendbuf, Real* recvbuf, Int count, MPI_Op op, Int root, MPI_Comm comm, MPI_Request & request );
319 
320 void
321 Ireduce ( Complex* sendbuf, Complex* recvbuf, Int count, MPI_Op op, Int root, MPI_Comm comm, MPI_Request & request );
322 #endif
323 
324 void
325 Allreduce ( Int* sendbuf, Int* recvbuf, Int count, MPI_Op op, MPI_Comm comm );
326 
327 void
328 Allreduce ( Real* sendbuf, Real* recvbuf, Int count, MPI_Op op, MPI_Comm comm );
329 
330 void
331 Allreduce ( Complex* sendbuf, Complex* recvbuf, Int count, MPI_Op op, MPI_Comm comm );
332 
333 // *********************************************************************
334 // Alltoall
335 // *********************************************************************
336 
337 void
338 Alltoallv ( Int *bufSend, Int *sizeSend, Int *displsSend,
339  Int *bufRecv, Int *sizeRecv,
340  Int *displsRecv, MPI_Comm comm );
341 
342 void
343 Alltoallv ( Real *bufSend, Int *sizeSend, Int *displsSend,
344  Real *bufRecv, Int *sizeRecv,
345  Int *displsRecv, MPI_Comm comm );
346 
347 void
348 Alltoallv ( Complex *bufSend, Int *sizeSend, Int *displsSend,
349  Complex *bufRecv, Int *sizeRecv,
350  Int *displsRecv, MPI_Comm comm );
351 
352 } // namespace mpi
353 
354 
355 } // namespace PEXSI
356 
357 
358 
359 #endif // _PEXSI_MPI_HPP_
360 
Environmental variables.