Loading...
Searching...
No Matches
G4MPIscorerMerger.cc
Go to the documentation of this file.
1//
2// ********************************************************************
3// * License and Disclaimer *
4// * *
5// * The Geant4 software is copyright of the Copyright Holders of *
6// * the Geant4 Collaboration. It is provided under the terms and *
7// * conditions of the Geant4 Software License, included in the file *
8// * LICENSE and available at http://cern.ch/geant4/license . These *
9// * include a list of copyright holders. *
10// * *
11// * Neither the authors of this software system, nor their employing *
12// * institutes,nor the agencies providing financial support for this *
13// * work make any representation or warranty, express or implied, *
14// * regarding this software system or assume any liability for its *
15// * use. Please see the license in the file LICENSE and URL above *
16// * for the full disclaimer and the limitation of liability. *
17// * *
18// * This code implementation is the result of the scientific and *
19// * technical work of the GEANT4 collaboration. *
20// * By using, copying, modifying or distributing the software (or *
21// * any work based on the software) you agree to acknowledge its *
22// * use in resulting scientific publications, and indicate your *
23// * acceptance of all terms of the Geant4 Software license. *
24// ********************************************************************
25//
26#include "G4MPIscorerMerger.hh"
27#include <map>
28#include <ostream>
29#include <algorithm>
30#include <assert.h>
31#include <functional>
32#include "G4MPIutils.hh"
33
34#define DMSG( LVL , MSG ) { if ( verbose > LVL ) { G4cout << MSG << G4endl; } }
35
36namespace {
37 //This class extends G4StatDouble to get access
38 //to all data-members of the base class.
39 //It provides two functions: pack and unpack
40 //into a buffer for sending/receiving via MPI
41 struct MPIStatDouble : public G4StatDouble {
42 G4int verbose;
43 inline void Pack(void* buffer,int bufferSize,int* position , MPI::Intracomm& comm ) const
44 {
45 DMSG(4,"Packing G4StatDouble(n,scale,sum_w,sum_w2,sum_wx,sum_wx2): "
46 <<m_n<<" "<<m_scale<<" "<<m_sum_w<<" "<<m_sum_w2
47 <<" "<<m_sum_wx<<" "<<m_sum_wx2);
48 MPI_Pack(&m_n,1,MPI::INT,buffer,bufferSize,position,comm);
49 const G4double data[]{m_scale,m_sum_w,m_sum_w2,m_sum_wx,m_sum_wx2};
50 MPI_Pack(&data,5,MPI::DOUBLE,buffer,bufferSize,position,comm);
51 }
52 inline void UnPack(void* buffer,int bufferSize,int* position , MPI::Intracomm& comm ) {
53 MPI_Unpack(buffer,bufferSize,position,&m_n,1,MPI::INT,comm);
54 G4double data[5];
55 MPI_Unpack(buffer,bufferSize,position,data,5,MPI::DOUBLE,comm);
56 m_scale = data[0];
57 m_sum_w = data[1];
58 m_sum_w2= data[2];
59 m_sum_wx= data[3];
60 m_sum_wx2=data[4];
61 DMSG(4,"UnPacking G4StatDouble(n,scale,sum_w,sum_w2,sum_wx,sum_wx2): "
62 <<m_n<<" "<<m_scale<<" "<<m_sum_w<<" "<<m_sum_w2
63 <<" "<<m_sum_wx<<" "<<m_sum_wx2);
64 }
65 MPIStatDouble(G4int ver = 0) : verbose(ver) {}
66 MPIStatDouble(const G4StatDouble& rhs , G4int ver) : verbose(ver)
67 {
68 G4StatDouble::operator=(rhs);
69 }
70 };
71}
72
74 outputBuffer(nullptr),outputBufferSize(0),outputBufferPosition(0),bytesSent(0),
75 ownsBuffer(false),scoringManager(nullptr),commSize(0),
76 destinationRank(G4MPImanager::kRANK_MASTER),verbose(0)
77{}
78
80 G4int destination,
81 G4int verbosity) :
82 outputBuffer(nullptr),outputBufferSize(0),outputBufferPosition(0),bytesSent(0),
83 ownsBuffer(false),
84 scoringManager(mgr), commSize(0), destinationRank(destination),
85 verbose(verbosity)
86{
87}
88
92
93/* Format of the message.
94 *
95 * Input:
96 * A vector of G4VScoringMesh, each of that is a
97 * std::map<name:G4String,G4THitsMap<G4double>*> where
98 * G4THitsMap<T> = std::map<int,T*>
99 *
100 * Output:
101 * A buffer:
102 * [0] : numMesh : int (**Begin Message**)
103 * [1] : meshID : int (** Begin Mesh**)
104 * [2] : numMaps : int
105 * [3] : sizeName : int (** Begin Map **)
106 * [4] : name[0] : char
107 * ...
108 * [...] : name[sizeName-1] : chare
109 * [...] : mapSize : int
110 * [...] : THitsMap.keys()[0] : int
111 * ...
112 * [...] : THitsMap.keys()[mapSize-1] : int
113 * [...] : THitsMap.values()[0] : double
114 * ...
115 * [...] : THitsMap.values()[mapSize-1] : double (**End Map**)
116 * [...] : Next Map : repeat from (**Begin Map**)
117 * ...
118 * [...] : Next Mesh : repeat from (**Begin Mesh**)
119 *
120 *
121 */
122
124 DMSG(0, "G4MPIscorerMerger::Merge called");
125 const unsigned int myrank = G4MPImanager::GetManager()->GetRank();
127 if ( commSize == 1 ) {
128 DMSG(1,"Comm world size is 1, nothing to do");
129 return;
130 }
131 const MPI::Intracomm* parentComm = G4MPImanager::GetManager()->GetComm();
132 comm = parentComm->Dup();
134
135 //ANDREA:->
136// G4cout<<"Before sending: "<<G4endl;
137// scoringManager->GetMesh(0)->Dump();
138// for ( int i = 0 ; i < scoringManager->GetNumberOfMesh() ; ++i ) {
139// for ( auto e : scoringManager->GetMesh(i)->GetScoreMap() )
140// {
141// G4cout<<e.first<<" : "<<e.second<<G4endl;
142// for ( auto c: *(e.second->GetMap()) ) {
143// G4cout<<c.first<<"="<<*c.second<<G4endl;
144//
145// }
146// }
147// }
148 //ANDREA:<-
149
150 bytesSent=0;
151 const G4double sttime = MPI::Wtime();
152
153 //Use G4MPIutils to optimize communications between ranks
154 typedef std::function<void(unsigned int)> handler_t;
155 using std::placeholders::_1;
156 handler_t sender = std::bind(&G4MPIscorerMerger::Send , this , _1);
157 handler_t receiver = std::bind(&G4MPIscorerMerger::Receive, this, _1);
158 std::function<void(void)> barrier = std::bind(&MPI::Intracomm::Barrier,&comm);
159 G4mpi::Merge( sender , receiver , barrier , commSize , myrank );
160
161 //OLD Style p2p communications
162 /*
163 if ( myrank != destinationRank ) {
164 DMSG(1,"Comm world size: "<<commSize<<" this rank is: "
165 <<myrank<<" sending to rank "<<destinationRank
166 <<" Number of mesh: "<< scoringManager->GetNumberOfMesh() );
167 Send(destinationRank);
168 } else {
169 DMSG(1,"Comm world size: "<<commSize<<" this rank is: "
170 <<myrank<<" receiving "
171 <<" Number of mesh: "<< scoringManager->GetNumberOfMesh() );
172 for ( unsigned int i = 0 ; i < commSize ; ++i ) {
173 if ( i != myrank ) Receive(i);
174 }
175 }
176*/
177 const G4double elapsed = MPI::Wtime() - sttime;
178 long total=0;
179 comm.Reduce(&bytesSent,&total,1,MPI::LONG,MPI::SUM,destinationRank);
180 if ( verbose > 0 && myrank == destinationRank ) {
181 //Collect from ranks how much data was sent around
182 G4cout<<"G4MPIscorerMerger::Merge() -data transfer performances: "
183 <<double(total)/1000./elapsed<<" kB/s"
184 <<" (Total Data Transfer= "<<double(total)/1000.<<" kB in "
185 <<elapsed<<" s)."<<G4endl;
186 }
187 //ANDREA:->
188// G4cout<<"After Receiving: "<<G4endl;
189// scoringManager->GetMesh(0)->Dump();
190// for ( int i = 0 ; i < scoringManager->GetNumberOfMesh() ; ++i ) {
191// for ( auto e : scoringManager->GetMesh(i)->GetScoreMap() )
192// {
193// G4cout<<e.first<<" : "<<e.second<<G4endl;
194// for ( auto c: *(e.second->GetMap()) ) {
195// G4cout<<c.first<<"="<<*c.second<<" (=2x"<<.5*(*c.second)<<")"<<G4endl;
196//
197// }
198// }
199// }
200 //ANDREA:<-
201 comm.Free();
202 DMSG(0,"G4MPIscorerMerger::Merge done.");
203}
204
205void G4MPIscorerMerger::Receive(const unsigned int source) {
206 DMSG(1,"Receiving scorers");
207 // DestroyBuffer();
208 DMSG(2,"Receiving from: "<<source);
209 MPI::Status status;
210 comm.Probe(source, G4MPImanager::kTAG_CMDSCR, status);
211 const G4int newbuffsize = status.Get_count(MPI::PACKED);
212 DMSG(2,"Preparing to receive buffer of size: "<<newbuffsize);
213 char* buffer = outputBuffer;
214 if ( newbuffsize > outputBufferSize ) {
215 DMSG(3,"New larger buffer expected, resize");
216 //New larger buffer incoming, recreate buffer
217 //TODO: use realloc?
218 delete[] outputBuffer;
219 buffer = new char[newbuffsize];
220 //Avoid complains from valgrind (i'm not really sure why this is needed, but, beside the
221 //small cpu penalty, we can live with that).)
222 std::fill( buffer , buffer + newbuffsize , 0 );
223 ownsBuffer = true;
224 }
225 SetupOutputBuffer(buffer,newbuffsize,0);
226 comm.Recv(buffer, newbuffsize, MPI::PACKED, source,
228 DMSG(3,"Buffer Size: "<<outputBufferSize<< " bytes at: "<<(void*)outputBuffer);
230 DMSG(1,"Receiving of comamnd line scorers done");
231}
232
233void G4MPIscorerMerger::Send(const unsigned int destination) {
234 DMSG(1,"Sending scorers "<<this);
235 //Step 1: Setup buffer to pack/unpack data
236 const G4int newbuffsize = CalculatePackSize(scoringManager);
237 //DestroyBuffer();
238 char* buffer = outputBuffer;
239 if ( newbuffsize > outputBufferSize ) {
240 delete[] outputBuffer;
241 buffer = new char[newbuffsize];
242 //Avoid complains from valgrind (i'm not really sure why this is needed, but, beside the
243 //small cpu penalty, we can live with that).)
244 std::fill( buffer , buffer+newbuffsize,0);
245 ownsBuffer = true;
246 }
247 SetupOutputBuffer(buffer,newbuffsize,0);
248 DMSG(3,"Buffer Size: "<<newbuffsize<< " bytes at: "<<(void*)outputBuffer);
251
252 //Version 1: p2p communication
253 comm.Send(outputBuffer, outputBufferSize, MPI::PACKED, destination, G4MPImanager::kTAG_CMDSCR);
254 bytesSent += newbuffsize;
255 //Receiver should use probe to get size of the package being sent
256 DMSG(1,"Sending done");
257}
258
260 assert(sm!=nullptr);
262 G4Exception("G4MPIscorerMerger::Pack(const G4ScoringManager*)",
263 "MPI001",FatalException,
264 "Call SetOututBuffer before trying to pack");
265 return;
266 }
267 DMSG(2,"Starting packing of meshes, # meshes: "<<sm->GetNumberOfMesh());
268 /*const*/ size_t numMeshes=sm->GetNumberOfMesh();//TODO: OLD MPI interface
269 MPI_Pack(&numMeshes,1,MPI::UNSIGNED,
272 comm);
273 for (size_t i = 0; i <numMeshes; ++i)
274 {
275 MPI_Pack(&i,1,MPI::UNSIGNED,
278 Pack(sm->GetMesh(i));
279 }
280}
281
283 assert(sm!=nullptr);
285 G4Exception("G4MPIscorerMerger::UnPack(const G4ScroingManager*)",
286 "MPI001",FatalException,
287 "Call SetOututBuffer before trying to un-pack");
288 return;
289 }
290 size_t numMeshes=0;
292 &numMeshes,1,MPI::UNSIGNED,comm);
293 if ( numMeshes != sm->GetNumberOfMesh() ) {
294 G4ExceptionDescription msg;
295 msg << "Number of meshes to unpack ("<<numMeshes;
296 msg <<") does not correspond to expected number ("<<sm->GetNumberOfMesh();
297 msg<<")";
298 G4Exception("G4MPIscorerMerger::UnPack(const G4ScroingManager*)",
299 "MPI001",FatalException,msg);
300 return;
301 }
302
303 size_t meshid=0;
304 for ( size_t i = 0 ; i < numMeshes ; ++i ) {
306 &meshid,1,MPI::UNSIGNED,comm);
307 if ( meshid != i ) {
308 G4ExceptionDescription msg;
309 msg<<"Cannot unpack: expecting mesh "<<i<<" and found "<<meshid;
310 msg<<" during unpack.";
311 G4Exception("G4MPIscorerMerger::UnPack(const G4ScroingManager*)",
312 "MPI001",FatalException,msg);
313 return;
314 }
315 G4VScoringMesh* original = sm->GetMesh(i);
316 UnPackAndMerge(original);
317 }
318}
319
321 assert(mesh!=nullptr);
322 assert(outputBuffer!=nullptr);
324 DMSG(3,"Packing mesh: "<<mesh);
325
326 auto map = mesh->GetScoreMap();
327 /*const*/ size_t nummaps = map.size();//TODO: old MPI interface
328 MPI_Pack(&nummaps,1,MPI::UNSIGNED,
331 for ( const auto& ele: map ) {
332 const G4String& name = ele.first;
333 /*const*/ size_t ss = name.size();
334 MPI_Pack(&ss,1,MPI::UNSIGNED,
337#ifdef G4MPI_USE_MPI_PACK_NOT_CONST
338 char* nn = new char[name.length()];
339 std::copy(name.begin(),name.end(),nn);
340#else
341 const char* nn = name.c_str();
342#endif
343 MPI_Pack(nn,ss,MPI::CHAR,outputBuffer,outputBufferSize,&outputBufferPosition,comm);
344 Pack(ele.second);
345#ifdef G4MPI_USE_MPI_PACK_NOT_CONST
346 delete[] nn;
347#endif
348 }
349}
350
352 assert(outputBuffer!=nullptr);
354 assert(inmesh!=nullptr);
355 DMSG(3,"Preparing to unpack a mesh and merge into: "<<inmesh);
356 const G4String& detName = inmesh->GetWorldName();
357 size_t nummaps = 0;
359 &nummaps,1,MPI::UNSIGNED,comm);
360 for ( size_t i = 0 ; i < nummaps ; ++i ) {
361 size_t nameSize = 0;
363 &nameSize,1,MPI::UNSIGNED,comm);
364 //Create a null-terminated c-string: needed later when converting this to a G4String
365 //(Not sure: but issue reported by valgrind with the use of MPI_Unpack)
366 char* name = new char[nameSize+1];
367 std::fill(name,name+nameSize+1,0);
369 name,nameSize,MPI::CHAR,comm);
370 const G4String colname(name,nameSize);
371 delete[] name;
372 //This memory churn is very inefficient, but we cannot reuse the HitMap
373 //because we cannot change the names
374 //TODO: Evaluate change in HitMap class to allow for change of names
375 //HitMap* hm = UnPackHitMap(detName,colname);
376 HitStatDoubleMap* hm = UnPackHitStatDoubleMap(detName,colname);
377 inmesh->Accumulate(hm);
378 delete hm;
379 }
380}
381
382
383//void G4MPIscorerMerger::Pack(const HitMap* sm) {
384// assert(sm!=nullptr);
385// assert(outputBuffer!=nullptr);
386// assert(outputBufferPosition<=outputBufferSize);
387// DMSG(3,"Packing hitmap: "<<sm<<" with: "<<sm->GetSize()<<" elements.");
388// /*const*/ size_t numEl = sm->GetSize();//TODO: old MPI implementation
389// MPI_Pack(&numEl,1,MPI::UNSIGNED,
390// outputBuffer,outputBufferSize,
391// &outputBufferPosition,comm);
392// const auto& theMap = *sm->GetMap();
393// std::vector<G4int> ids;
394// std::vector<G4double> vals;
395// std::transform(theMap.begin(),theMap.end(),std::back_inserter(ids),
396// [](decltype(*theMap.begin())& e){ return e.first;});
397// std::transform(theMap.begin(),theMap.end(),std::back_inserter(vals),
398// [](decltype(*theMap.begin())& e){ return *e.second;});
399// assert(ids.size()==vals.size()&&ids.size()==numEl);
400// MPI_Pack(ids.data(),ids.size(),MPI::INT,
401// outputBuffer,outputBufferSize,
402// &outputBufferPosition,comm);
403// MPI_Pack(vals.data(),vals.size(),MPI::DOUBLE,
404// outputBuffer,outputBufferSize,
405// &outputBufferPosition,comm);
406//}
407
409 assert(sm!=nullptr);
410 assert(outputBuffer!=nullptr);
412 DMSG(3,"Packing hitmap: "<<sm<<" with: "<<sm->GetSize()<<" elements.");
413 /*const*/ size_t numEl = sm->GetSize();//TODO: old MPI implementation
414 MPI_Pack(&numEl,1,MPI::UNSIGNED,
417 const auto& theMap = *sm->GetMap();
418 std::vector<G4int> ids;
419 std::transform(theMap.begin(),theMap.end(),std::back_inserter(ids),
420 [](decltype(*theMap.begin())& e){ return e.first;});
421 assert(/*ids.size()==vals.size()&&*/ids.size()==numEl);
422 MPI_Pack(ids.data(),ids.size(),MPI::INT,outputBuffer,outputBufferSize,
424 for( const auto& e : theMap) {
425 const MPIStatDouble sd(*e.second,verbose);
427 }
428}
429
430//HitMap* G4MPIscorerMerger::UnPackHitMap(const G4String& detName,
431// const G4String& colName) {
432// assert(outputBuffer!=nullptr);
433// assert(outputBufferPosition<=outputBufferSize);
434// DMSG(3,"Preparing to unpack a hit map for: "<<detName<<","<<colName);
435// size_t numEl =0 ;
436// MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
437// &numEl,1,MPI::UNSIGNED,comm);
438// G4int* ids = new G4int[numEl];
439// MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
440// ids,numEl,MPI::INT,comm);
441// G4double* vals = new G4double[numEl];
442// MPI_Unpack(outputBuffer,outputBufferSize,&outputBufferPosition,
443// vals,numEl,MPI::DOUBLE,comm);
444// HitMap* result = new HitMap(detName,colName);
445// for ( unsigned int i = 0; i<numEl;++i) result->set(ids[i],vals[i]);
446// delete[] ids;
447// delete[] vals;
448// return result;
449//}
450
452 const G4String& detName, const G4String& colName)
453{
454 assert(outputBuffer!=nullptr);
456 DMSG(3,"Preparing to unpack a hit map for: "<<detName<<","<<colName);
457 size_t numEl =0 ;
459 &numEl,1,MPI::UNSIGNED,comm);
460 DMSG(3,"Will receive "<<numEl<<" values");
461 G4int* ids = new G4int[numEl];
463 ids,numEl,MPI::INT,comm);
464 HitStatDoubleMap* result = new HitStatDoubleMap(detName,colName);
465 for ( unsigned int i = 0; i<numEl;++i) {
466 MPIStatDouble sd(verbose);
468 result->set(ids[i],sd);
469 }
470 delete[] ids;
471 return result;
472}
473
474
476{
477 DMSG(3,"Calculating dimension of data to send");
478 if ( sm == nullptr ) return 0;
479 //Calcualte how much data each call to Pack* appends to the buffer
480 //e.g. sizeof(data)
481 //The number of sizeof here should match the number of calls to MPI_Pack
482
483 //Pack(ScoringMgr)
484 G4int size = sizeof(unsigned int);
485 DMSG(3,"There are "<<sm->GetNumberOfMesh()<<" meshes.");
486 //Loop on mesh
487 for ( size_t i = 0 ; i<sm->GetNumberOfMesh() ; ++i ) {
488 size += sizeof(unsigned int);//ID
489 size += CalculatePackSize(sm->GetMesh(i));
490 }
491 return size;
492}
493
495{
496 DMSG(3,"Calculating size for mesh: "<<mesh);
497 //PackSingleMesh(Mesh)
498 G4int size = sizeof(unsigned int);//num maps
499 auto map = mesh->GetScoreMap();
500 for (const auto& ele : map ) {
501 //PackHitsMap
502 size += sizeof(unsigned int);//name size
503 const G4String& name = ele.first;
504 size += sizeof(char)*name.size();//name
505 size += CalculatePackSize(ele.second);
506 }
507 DMSG(3,"mesh "<<mesh<<" size: "<<size);
508 return size;
509}
510
511//G4int G4MPIscorerMerger::CalculatePackSize(const HitMap* map) const {
512// const G4int numEls = map->GetSize();
513// G4int size = sizeof(unsigned int);
514// size += sizeof(G4int)*numEls;
515// size += sizeof(G4double)*numEls;
516// DMSG(3,"HitMap "<<map<<" size: "<<size<<" in "<<numEls<<" elements.");
517// return size;
518//}
519
521 const G4int numEls = map->GetSize();
522 G4int size = sizeof(unsigned int);
523 size += sizeof(G4int)*numEls;
524 //G4StatDouble: 5 doubles and 1 int
525 //Can I use sizeof(G4StatDouble)? NO sizeof(G4StatDouble)==56
526 size += numEls*(sizeof(G4double)*5+sizeof(G4int));
527 DMSG(3,"HitStatDoubleMap "<<map<<" size: "<<size<<" in "<<numEls<<" elements.");
528 return size;
529}
530
#define DMSG(LVL, MSG)
G4THitsMap< G4StatDouble > HitStatDoubleMap
static G4MPImanager * GetManager()
G4int GetActiveSize() const
const MPI::Intracomm * GetComm() const
G4int GetRank() const
G4int CalculatePackSize(const G4ScoringManager *) const
void Send(const unsigned int destination)
unsigned int destinationRank
void Receive(const unsigned int source)
G4ScoringManager * scoringManager
HitStatDoubleMap * UnPackHitStatDoubleMap(const G4String &detName, const G4String &colName)
void SetupOutputBuffer(char *buff, G4int size, G4int position)
void Pack(const G4ScoringManager *)
Pack all meshes into buffer.
void UnPackAndMerge(const G4ScoringManager *)
void Merge(std::function< void(unsigned int)> senderF, std::function< void(unsigned int)> receiverF, std::function< void(void)> barrierF, unsigned int commSize, unsigned int myrank)

Applications | User Support | Publications | Collaboration