PEXSI
 All Classes Namespaces Files Functions Variables Typedefs Pages
mpi_interf.hpp
Go to the documentation of this file.
1 /*
2  Copyright (c) 2012 The Regents of the University of California,
3  through Lawrence Berkeley National Laboratory.
4 
5 Author: Lin Lin
6 
7 This file is part of PEXSI. All rights reserved.
8 
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
11 
12 (1) Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
14 (2) Redistributions in binary form must reproduce the above copyright notice,
15 this list of conditions and the following disclaimer in the documentation
16 and/or other materials provided with the distribution.
17 (3) Neither the name of the University of California, Lawrence Berkeley
18 National Laboratory, U.S. Dept. of Energy nor the names of its contributors may
19 be used to endorse or promote products derived from this software without
20 specific prior written permission.
21 
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
23 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
26 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
27 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 
33 You are under no obligation whatsoever to provide any bug fixes, patches, or
34 upgrades to the features, functionality or performance of the source code
35 ("Enhancements") to anyone; however, if you choose to make your Enhancements
36 available either publicly, or directly to Lawrence Berkeley National
37 Laboratory, without imposing a separate written license agreement for such
38 Enhancements, then you hereby grant the following license: a non-exclusive,
39 royalty-free perpetual license to install, use, modify, prepare derivative
40 works, incorporate into other computer software, distribute, and sublicense
41 such enhancements or derivative works thereof, in binary and source code form.
42  */
46 #ifndef _PEXSI_MPI_HPP_
47 #define _PEXSI_MPI_HPP_
48 
49 #include "pexsi/environment.hpp"
50 
51 namespace PEXSI{
52 
56 namespace mpi{
57 
58 
59 // *********************************************************************
60 // Gatherv
61 //
62 // NOTE: The interface is quite preliminary.
63 // *********************************************************************
64 void Gatherv(
65  std::vector<Int>& localVec,
66  std::vector<Int>& allVec,
67  Int root,
68  MPI_Comm comm );
69 
70 
71 
72 template<typename T>
73  void
74  Gatherv (
75  std::vector<T>& localVec,
76  std::vector<T>& allVec,
77  Int root,
78  MPI_Comm comm )
79  {
80  Int mpirank, mpisize;
81  MPI_Comm_rank( comm, &mpirank );
82  MPI_Comm_size( comm, &mpisize );
83 
84  Int localSize = localVec.size()*sizeof(T);
85  std::vector<Int> localSizeVec( mpisize );
86  MPI_Gather( &localSize, 1, MPI_INT, &localSizeVec[0], 1, MPI_INT,root, comm );
87 
88  if(mpirank==root){
89  std::vector<Int> localSizeDispls( mpisize );
90  localSizeDispls[0] = 0;
91  for( Int ip = 1; ip < mpisize; ip++ ){
92  localSizeDispls[ip] = localSizeDispls[ip-1] + localSizeVec[ip-1]*sizeof(T);
93  }
94  Int totalSize = localSizeDispls[mpisize-1] + localSizeVec[mpisize-1]*sizeof(T);
95 
96  allVec.clear();
97  allVec.resize( totalSize / sizeof(T) );
98 
99  MPI_Gatherv( &localVec[0], localSize, MPI_BYTE, &allVec[0],
100  &localSizeVec[0], &localSizeDispls[0], MPI_BYTE, root, comm );
101  }
102  else{
103  MPI_Gatherv( &localVec[0], localSize, MPI_BYTE, NULL,
104  NULL, NULL, MPI_INT, root, comm );
105  }
106 
107  return ;
108  } // ----- end of function Gatherv -----
109 
110 
111 void
112  Gatherv (
113  std::vector<Int>& localVec,
114  std::vector<Int>& allVec,
115  std::vector<Int>& sizes,
116  std::vector<Int>& displs,
117  Int root,
118  MPI_Comm comm );
119 
120 
121 template<typename T>
122  void
123  Gatherv (
124  std::vector<T>& localVec,
125  std::vector<T>& allVec,
126  std::vector<T>& sizes,
127  std::vector<T>& displs,
128  Int root,
129  MPI_Comm comm )
130  {
131  Int mpirank, mpisize;
132  MPI_Comm_rank( comm, &mpirank );
133  MPI_Comm_size( comm, &mpisize );
134 
135  Int localSize = localVec.size()*sizeof(T);
136 
137  if(mpirank==root){
138  std::vector<Int> & localSizeVec = sizes;
139  localSizeVec.resize( mpisize );
140  MPI_Gather( &localSize, 1, MPI_INT, &localSizeVec[0], 1, MPI_INT,root, comm );
141  std::vector<Int> & localSizeDispls = displs;
142  localSizeDispls.resize( mpisize );
143  localSizeDispls[0] = 0;
144  for( Int ip = 1; ip < mpisize; ip++ ){
145  localSizeDispls[ip] = localSizeDispls[ip-1] + localSizeVec[ip-1]*sizeof(T);
146  }
147  Int totalSize = localSizeDispls[mpisize-1] + localSizeVec[mpisize-1]*sizeof(T);
148 
149  allVec.clear();
150  allVec.resize( totalSize / sizeof(T) );
151 
152  MPI_Gatherv( &localVec[0], localSize, MPI_BYTE, &allVec[0],
153  &localSizeVec[0], &localSizeDispls[0], MPI_BYTE, root, comm );
154  }
155  else{
156  MPI_Gather( &localSize, 1, MPI_INT, NULL, 1, MPI_INT,root, comm );
157  MPI_Gatherv( &localVec[0], localSize, MPI_BYTE, NULL,
158  NULL, NULL, MPI_INT, root, comm );
159  }
160 
161  return ;
162  } // ----- end of function Gatherv -----
163 
164 
165 
166 
167 
168 // *********************************************************************
169 // Allgatherv
170 //
171 // NOTE: The interface is quite preliminary.
172 // *********************************************************************
173 void Allgatherv(
174  std::vector<Int>& localVec,
175  std::vector<Int>& allVec,
176  MPI_Comm comm );
177 
178 
179 
180 
181 
182 template <typename T>
183  void
184  Allgatherv (
185  std::vector<T>& localVec,
186  std::vector<T>& allVec,
187  MPI_Comm comm )
188  {
189  Int mpirank, mpisize;
190  MPI_Comm_rank( comm, &mpirank );
191  MPI_Comm_size( comm, &mpisize );
192 
193  Int localSize = localVec.size()*sizeof(T);
194  std::vector<Int> localSizeVec( mpisize );
195  std::vector<Int> localSizeDispls( mpisize );
196  MPI_Allgather( &localSize, 1, MPI_INT, &localSizeVec[0], 1, MPI_INT, comm );
197  localSizeDispls[0] = 0;
198  for( Int ip = 1; ip < mpisize; ip++ ){
199  localSizeDispls[ip] = (localSizeDispls[ip-1] + localSizeVec[ip-1])*sizeof(T);
200  }
201  Int totalSize = (localSizeDispls[mpisize-1] + localSizeVec[mpisize-1])*sizeof(T);
202 
203  allVec.clear();
204  allVec.resize( totalSize/sizeof(T) );
205 
206  MPI_Allgatherv( &localVec[0], localSize, MPI_BYTE, &allVec[0],
207  &localSizeVec[0], &localSizeDispls[0], MPI_BYTE, comm );
208 
209 
210  return ;
211  }; // ----- end of function Allgatherv -----
212 
213 
214 
215 
216 
217 
218 template <typename T>
219  void
220  Bcast (
221  std::vector<T>& dataVec,
222  Int root,
223  MPI_Comm comm )
224  {
225  Int mpirank, mpisize;
226  MPI_Comm_rank( comm, &mpirank );
227  MPI_Comm_size( comm, &mpisize );
228 
229  Int localSize = dataVec.size();
230  MPI_Bcast( &localSize, sizeof(localSize), MPI_BYTE, root , comm );
231 
232  if(mpirank!=root){
233  dataVec.clear();
234  dataVec.resize(localSize);
235  }
236 
237  MPI_Bcast( &dataVec[0], localSize*sizeof(T), MPI_BYTE, root , comm );
238 
239 
240  return ;
241  }; // ----- end of function Bcast -----
242 
243 
244 
245 
246 
247 
248 
249 
250 
251 
252 // *********************************************************************
253 // Send / Recv for stringstream
254 //
255 // Isend / Irecv is not here because the size and content has to be
256 // communicated separately for non-blocking communication.
257 // *********************************************************************
258 
259 void Send( std::stringstream& sstm, Int dest, Int tagSize, Int tagContent,
260  MPI_Comm comm );
261 
262 void Recv ( std::stringstream& sstm, Int src, Int tagSize, Int tagContent,
263  MPI_Comm comm, MPI_Status& statSize, MPI_Status& statContent );
264 
265 void Recv ( std::stringstream& sstm, Int src, Int tagSize, Int tagContent,
266  MPI_Comm comm );
267 
268 // *********************************************************************
269 // Waitall
270 // *********************************************************************
271 
272 void
273  Wait ( MPI_Request& req );
274 
275 void
276  Waitall ( std::vector<MPI_Request>& reqs, std::vector<MPI_Status>& stats );
277 
278 void
279  Waitall ( std::vector<MPI_Request>& reqs );
280 
281 // *********************************************************************
282 // Reduce
283 // *********************************************************************
284 
285 void
286  Reduce ( Real* sendbuf, Real* recvbuf, Int count, MPI_Op op, Int root, MPI_Comm comm );
287 
288 void
289  Reduce ( Complex* sendbuf, Complex* recvbuf, Int count, MPI_Op op, Int root, MPI_Comm comm );
290 
291 
292 #ifdef _USE_MPI3_
293 void
294  Ireduce ( Real* sendbuf, Real* recvbuf, Int count, MPI_Op op, Int root, MPI_Comm comm, MPI_Request & request );
295 
296 void
297  Ireduce ( Complex* sendbuf, Complex* recvbuf, Int count, MPI_Op op, Int root, MPI_Comm comm, MPI_Request & request );
298 #endif
299 
300 void
301  Allreduce ( Int* sendbuf, Int* recvbuf, Int count, MPI_Op op, MPI_Comm comm );
302 
303 void
304  Allreduce ( Real* sendbuf, Real* recvbuf, Int count, MPI_Op op, MPI_Comm comm );
305 
306 void
307  Allreduce ( Complex* sendbuf, Complex* recvbuf, Int count, MPI_Op op, MPI_Comm comm );
308 
309 // *********************************************************************
310 // Alltoall
311 // *********************************************************************
312 
313 void
314  Alltoallv ( Int *bufSend, Int *sizeSend, Int *displsSend,
315  Int *bufRecv, Int *sizeRecv,
316  Int *displsRecv, MPI_Comm comm );
317 
318 void
319  Alltoallv ( Real *bufSend, Int *sizeSend, Int *displsSend,
320  Real *bufRecv, Int *sizeRecv,
321  Int *displsRecv, MPI_Comm comm );
322 
323 void
324  Alltoallv ( Complex *bufSend, Int *sizeSend, Int *displsSend,
325  Complex *bufRecv, Int *sizeRecv,
326  Int *displsRecv, MPI_Comm comm );
327 
328 } // namespace mpi
329 
330 
331 } // namespace PEXSI
332 
333 
334 
335 #endif // _PEXSI_MPI_HPP_
336 
Environmental variables.