Loading...
Searching...
No Matches
Public Types | Public Member Functions | Static Public Member Functions | Private Member Functions | Private Attributes | Static Private Attributes | List of all members
G4MPImanager Class Reference

#include <Doxymodules_parallel.h>

Public Types

enum  { kRANK_MASTER = 0 }
 
enum  {
  kTAG_G4COMMAND = 100 , kTAG_G4STATUS = 200 , kTAG_G4SEED = 300 , kTAG_DATA = 1000 ,
  kTAG_HISTO = 1001 , kTAG_RUN = 1002 , kTAG_CMDSCR = 1003 , kTAG_NTUPLE = 1004
}
 

Public Member Functions

 G4MPImanager (int nof_extra_workers=0)
 
 G4MPImanager (int argc, char **argv, int nof_extra_workers=0)
 
 ~G4MPImanager ()
 
G4MPIsessionGetMPIsession () const
 
G4int GetVerbose () const
 
void SetVerbose (G4int iverbose)
 
G4int GetTotalSize () const
 
G4int GetActiveSize () const
 
G4int GetRank () const
 
G4bool IsMaster () const
 
G4bool IsSlave () const
 
G4bool IsExtraWorker () const
 
G4bool IsInitMacro () const
 
const G4StringGetInitFileName () const
 
G4bool IsBatchMode () const
 
const G4StringGetMacroFileName () const
 
void SetMasterWeight (G4double aweight)
 
G4double GetMasterWeight () const
 
void SetExtraWorker (G4VMPIextraWorker *extraWorker)
 
G4VMPIextraWorkerGetExtraWorker () const
 
G4VMPIseedGeneratorGetSeedGenerator () const
 
G4String BcastCommand (const G4String &command)
 
void ShowStatus ()
 
void ShowSeeds ()
 
void SetSeed (G4int inode, G4long seed)
 
void WaitBeamOn ()
 
void DistributeSeeds ()
 
void ExecuteMacroFile (const G4String &fname, G4bool qbatch=false)
 
G4bool CheckThreadStatus ()
 
void ExecuteThreadCommand (const G4String &command)
 
void ExecuteBeamOnThread (const G4String &command)
 
void JoinBeamOnThread ()
 
void BeamOn (G4int nevent, G4bool qdivide=true)
 
void Print (const G4String &message)
 
G4int GetEventsInMaster () const
 
G4int GetEventsInSlave () const
 
void ShowHelp () const
 
const MPI::Intracomm * GetComm () const
 
const MPI_Comm * GetProcessingComm () const
 
const MPI_Comm * GetCollectingComm () const
 
const MPI_Comm * GetAllComm () const
 

Static Public Member Functions

static G4MPImanagerGetManager ()
 

Private Member Functions

 DISALLOW_COPY_AND_ASSIGN (G4MPImanager)
 
void Initialize ()
 
void ParseArguments (G4int argc, char **argv)
 
void UpdateStatus ()
 

Private Attributes

G4MPImessengermessenger_
 
G4MPIsessionsession_
 
G4VMPIextraWorkerextra_worker_
 
G4VMPIseedGeneratorseed_generator_
 
G4MPIstatusstatus_
 
G4int verbose_
 
G4bool is_master_
 
G4bool is_slave_
 
G4bool is_extra_worker_
 
G4int rank_
 
G4int size_
 
G4int world_size_
 
MPI::Intracomm COMM_G4COMMAND_
 
MPI_Comm processing_comm_
 
MPI_Comm collecting_comm_
 
MPI_Comm all_comm_
 
MPI_Group world_group_
 
MPI_Group processing_group_
 
MPI_Group collecting_group_
 
MPI_Group all_group_
 
G4bool qfcout_
 
std::ofstream fscout_
 
G4bool qinitmacro_
 
G4String init_file_name_
 
G4bool qbatchmode_
 
G4String macro_file_name_
 
pthread_t thread_id_
 
G4int fevents_in_master = 0
 
G4int fevents_in_slave = 0
 
G4double master_weight_
 
G4int nof_extra_workers_
 

Static Private Attributes

static G4MPImanagerg4mpi_ = NULL
 

Detailed Description

Definition at line 63 of file Doxymodules_parallel.h.

Member Enumeration Documentation

◆ anonymous enum

anonymous enum
Enumerator
kRANK_MASTER 

Definition at line 49 of file G4MPImanager.hh.

49{ kRANK_MASTER = 0 };

◆ anonymous enum

anonymous enum
Enumerator
kTAG_G4COMMAND 
kTAG_G4STATUS 
kTAG_G4SEED 
kTAG_DATA 
kTAG_HISTO 
kTAG_RUN 
kTAG_CMDSCR 
kTAG_NTUPLE 

Definition at line 51 of file G4MPImanager.hh.

51 { // MPI tag
52 kTAG_G4COMMAND = 100,
53 kTAG_G4STATUS = 200,
54 kTAG_G4SEED = 300,
55 kTAG_DATA = 1000,
56 kTAG_HISTO = 1001,
57 kTAG_RUN = 1002,
58 kTAG_CMDSCR = 1003,
59 kTAG_NTUPLE = 1004
60 };

Constructor & Destructor Documentation

◆ G4MPImanager() [1/2]

G4MPImanager::G4MPImanager ( int  nof_extra_workers = 0)

Definition at line 69 of file G4MPImanager.cc.

70 : verbose_(0),
71 COMM_G4COMMAND_(MPI_COMM_NULL), processing_comm_(MPI_COMM_NULL),
72 collecting_comm_(MPI_COMM_NULL), all_comm_(MPI_COMM_NULL),
73 qfcout_(false), qinitmacro_(false), qbatchmode_(false),
74 thread_id_(0), master_weight_(1.), nof_extra_workers_(nof_extra_workers)
75{
76 //MPI::Init();
77 MPI::Init_thread(MPI::THREAD_SERIALIZED);
78 Initialize();
79}
MPI::Intracomm COMM_G4COMMAND_
G4double master_weight_
MPI_Comm all_comm_
MPI_Comm collecting_comm_
MPI_Comm processing_comm_
pthread_t thread_id_
G4int nof_extra_workers_

◆ G4MPImanager() [2/2]

G4MPImanager::G4MPImanager ( int  argc,
char **  argv,
int  nof_extra_workers = 0 
)

Definition at line 82 of file G4MPImanager.cc.

83 : verbose_(0),
84 COMM_G4COMMAND_(MPI_COMM_NULL), processing_comm_(MPI_COMM_NULL),
85 collecting_comm_(MPI_COMM_NULL), all_comm_(MPI_COMM_NULL),
86 qfcout_(false), qinitmacro_(false), qbatchmode_(false),
87 thread_id_(0), master_weight_(1.), nof_extra_workers_(nof_extra_workers)
88{
89 //MPI::Init(argc, argv);
90 MPI::Init_thread(argc, argv, MPI::THREAD_SERIALIZED);
91 Initialize();
92 ParseArguments(argc, argv);
93}
void ParseArguments(G4int argc, char **argv)

◆ ~G4MPImanager()

G4MPImanager::~G4MPImanager ( )

Definition at line 96 of file G4MPImanager.cc.

97{
98 if( is_slave_ && qfcout_ ) fscout_.close();
99
100 delete status_;
101 delete messenger_;
102 delete session_;
103
104 if ( nof_extra_workers_ ) {
105 MPI_Group_free(&world_group_);
106 MPI_Group_free(&processing_group_);
107 MPI_Group_free(&collecting_group_);
108 MPI_Group_free(&all_group_);
109 if (processing_comm_ != MPI_COMM_NULL) {
110 MPI_Comm_free(&processing_comm_);
111 }
112 if (collecting_comm_ != MPI_COMM_NULL) {
113 MPI_Comm_free(&collecting_comm_);
114 }
115 if (all_comm_ != MPI_COMM_NULL) {
116 MPI_Comm_free(&all_comm_);
117 }
118 }
119 else {
120 COMM_G4COMMAND_.Free();
121 }
122
123 MPI::Finalize();
124}
G4MPImessenger * messenger_
MPI_Group world_group_
MPI_Group collecting_group_
MPI_Group processing_group_
G4MPIsession * session_
MPI_Group all_group_
G4MPIstatus * status_
std::ofstream fscout_

Member Function Documentation

◆ GetManager()

G4MPImanager * G4MPImanager::GetManager ( )
static

Definition at line 127 of file G4MPImanager.cc.

128{
129 if ( g4mpi_ == NULL ) {
130 G4Exception("G4MPImanager::GetManager()", "MPI001",
131 FatalException, "G4MPImanager is not instantiated.");
132 }
133 return g4mpi_;
134}
static G4MPImanager * g4mpi_

◆ GetMPIsession()

G4MPIsession * G4MPImanager::GetMPIsession ( ) const
inline

Definition at line 186 of file G4MPImanager.hh.

187{
188 return session_;
189}

◆ GetVerbose()

G4int G4MPImanager::GetVerbose ( ) const
inline

Definition at line 191 of file G4MPImanager.hh.

192{
193 return verbose_;
194}

◆ SetVerbose()

void G4MPImanager::SetVerbose ( G4int  iverbose)
inline

Definition at line 196 of file G4MPImanager.hh.

197{
198 G4int lv = iverbose;
199 if( iverbose > 1 ) lv = 1;
200 if( iverbose < 0 ) lv = 0;
201
202 verbose_ = lv;
203 return;
204}

◆ GetTotalSize()

G4int G4MPImanager::GetTotalSize ( ) const
inline

Definition at line 211 of file G4MPImanager.hh.

212{
213 return world_size_;
214}

◆ GetActiveSize()

G4int G4MPImanager::GetActiveSize ( ) const
inline

Definition at line 216 of file G4MPImanager.hh.

217{
218 return size_;
219}

◆ GetRank()

G4int G4MPImanager::GetRank ( ) const
inline

Definition at line 206 of file G4MPImanager.hh.

207{
208 return rank_;
209}

◆ IsMaster()

G4bool G4MPImanager::IsMaster ( ) const
inline

Definition at line 221 of file G4MPImanager.hh.

222{
223 return is_master_;
224}

◆ IsSlave()

G4bool G4MPImanager::IsSlave ( ) const
inline

Definition at line 226 of file G4MPImanager.hh.

227{
228 return is_slave_;
229}

◆ IsExtraWorker()

G4bool G4MPImanager::IsExtraWorker ( ) const
inline

Definition at line 231 of file G4MPImanager.hh.

232{
233 return is_extra_worker_;
234}
G4bool is_extra_worker_

◆ IsInitMacro()

G4bool G4MPImanager::IsInitMacro ( ) const
inline

Definition at line 236 of file G4MPImanager.hh.

237{
238 return qinitmacro_;
239
240}

◆ GetInitFileName()

const G4String & G4MPImanager::GetInitFileName ( ) const
inline

Definition at line 242 of file G4MPImanager.hh.

243{
244 return init_file_name_;
245
246}
G4String init_file_name_

◆ IsBatchMode()

G4bool G4MPImanager::IsBatchMode ( ) const
inline

Definition at line 248 of file G4MPImanager.hh.

249{
250 return qbatchmode_;
251}

◆ GetMacroFileName()

const G4String & G4MPImanager::GetMacroFileName ( ) const
inline

Definition at line 253 of file G4MPImanager.hh.

254{
255 return macro_file_name_;
256}
G4String macro_file_name_

◆ SetMasterWeight()

void G4MPImanager::SetMasterWeight ( G4double  aweight)
inline

Definition at line 258 of file G4MPImanager.hh.

259{
260 master_weight_ = aweight;
261
262 if( aweight < 0. ) master_weight_ = 0.;
263 if( aweight > 1. ) master_weight_ = 1.;
264}

◆ GetMasterWeight()

G4double G4MPImanager::GetMasterWeight ( ) const
inline

Definition at line 266 of file G4MPImanager.hh.

267{
268 return master_weight_;
269}

◆ SetExtraWorker()

void G4MPImanager::SetExtraWorker ( G4VMPIextraWorker extraWorker)

Definition at line 137 of file G4MPImanager.cc.

138{
139 if ( ! nof_extra_workers_ ) {
140 G4Exception("G4MPImanager::SetExtraWorker()", "MPI001",
141 FatalException, "Number of extra workers >0 must be set first.");
142 }
143
144 extra_worker_ = extraWorker;
145}
G4VMPIextraWorker * extra_worker_

◆ GetExtraWorker()

G4VMPIextraWorker * G4MPImanager::GetExtraWorker ( ) const
inline

Definition at line 271 of file G4MPImanager.hh.

272{
273 return extra_worker_;
274}

◆ GetSeedGenerator()

G4VMPIseedGenerator * G4MPImanager::GetSeedGenerator ( ) const
inline

Definition at line 276 of file G4MPImanager.hh.

277{
278 return seed_generator_;
279}
G4VMPIseedGenerator * seed_generator_

◆ BcastCommand()

G4String G4MPImanager::BcastCommand ( const G4String command)

Definition at line 515 of file G4MPImanager.cc.

516{
517 // Do nothing if not processing worker
518 if (is_extra_worker_) return G4String("exit");
519
520 enum { kBUFF_SIZE = 512 };
521 static char sbuff[kBUFF_SIZE];
522 command.copy(sbuff, kBUFF_SIZE);
523 G4int len = command.size();
524 sbuff[len] ='\0'; // no boundary check
525
526 // "command" is not yet fixed in slaves at this time.
527
528 // waiting message exhausts CPU in LAM!
529 //COMM_G4COMMAND_.Bcast(sbuff, ssize, MPI::CHAR, RANK_MASTER);
530
531 // another implementation
532 if( is_master_ ) {
533 for ( G4int islave = 1; islave < size_; islave++ ) {
534 COMM_G4COMMAND_.Send(sbuff, kBUFF_SIZE, MPI::CHAR,
535 islave, kTAG_G4COMMAND);
536 }
537 } else {
538 // try non-blocking receive
539 MPI::Request request= COMM_G4COMMAND_.Irecv(sbuff, kBUFF_SIZE, MPI::CHAR,
541 // polling...
542 while(! request.Test()) {
543 ::Wait(1000);
544 }
545 }
546
547 return G4String(sbuff);
548}

◆ ShowStatus()

void G4MPImanager::ShowStatus ( )

Definition at line 334 of file G4MPImanager.cc.

335{
336 G4int buff[G4MPIstatus::kNSIZE];
337
338 UpdateStatus();
339 G4bool gstatus = CheckThreadStatus();
340
341 if ( is_master_ ) {
342 status_-> Print(); // for maser itself
343
344 G4int nev = status_-> GetEventID();
345 G4int nevtp = status_-> GetNEventToBeProcessed();
346 G4double cputime = status_-> GetCPUTime();
347
348 // receive from each slave
349 for ( G4int islave = 1; islave < size_; islave++ ) {
350 COMM_G4COMMAND_.Recv(buff, G4MPIstatus::kNSIZE, MPI::INT,
351 islave, kTAG_G4STATUS);
352 status_-> UnPack(buff);
353 status_-> Print();
354
355 // aggregation
356 nev += status_-> GetEventID();
357 nevtp += status_-> GetNEventToBeProcessed();
358 cputime += status_-> GetCPUTime();
359 }
360
361 G4String strStatus;
362 if ( gstatus ) {
363 strStatus = "Run";
364 } else {
365 strStatus = "Idle";
366 }
367
368 G4cout << "-------------------------------------------------------"
369 << G4endl
370 << "* #ranks= " << size_
371 << " event= " << nev << "/" << nevtp
372 << " state= " << strStatus
373 << " time= " << cputime << "s"
374 << G4endl;
375 } else {
376 status_-> Pack(buff);
377 COMM_G4COMMAND_.Send(buff, G4MPIstatus::kNSIZE, MPI::INT,
379 }
380}
G4bool CheckThreadStatus()
void UpdateStatus()
void Print(const G4String &message)

◆ ShowSeeds()

void G4MPImanager::ShowSeeds ( )

Definition at line 393 of file G4MPImanager.cc.

394{
395 G4long buff;
396
397 if ( is_master_ ) {
398 // print master
399 G4cout << "* rank= " << rank_
400 << " seed= " << G4Random::getTheSeed()
401 << G4endl;
402 // receive from each slave
403 for ( G4int islave = 1; islave < size_; islave++ ) {
404 COMM_G4COMMAND_.Recv(&buff, 1, MPI::LONG, islave, kTAG_G4SEED);
405 G4cout << "* rank= " << islave
406 << " seed= " << buff
407 << G4endl;
408 }
409 } else { // slaves
410 buff = G4Random::getTheSeed();
411 COMM_G4COMMAND_.Send(&buff, 1, MPI::LONG, kRANK_MASTER, kTAG_G4SEED);
412 }
413}

◆ SetSeed()

void G4MPImanager::SetSeed ( G4int  inode,
G4long  seed 
)

Definition at line 416 of file G4MPImanager.cc.

417{
418 if( rank_ == inode ) {
419 CLHEP::HepRandom::setTheSeed(seed);
420 }
421}

◆ WaitBeamOn()

void G4MPImanager::WaitBeamOn ( )

Definition at line 617 of file G4MPImanager.cc.

618{
619 // G4cout << "G4MPImanager::WaitBeamOn" << G4endl;
620
621 // Extra worker
622 if (is_extra_worker_) {
623 if ( extra_worker_ ) {
624 G4cout << "Calling extra_worker " << G4endl;
626 } else {
627 G4cout << " !!!! extra_worker_ is not defined " << G4endl;
628 }
629 return;
630 }
631
632 G4int buff = 0;
633 if ( qbatchmode_ ) { // valid only in batch mode
634 if ( is_master_ ) {
635 // receive from each slave
636 for (G4int islave = 1; islave < size_; islave++) {
637 // G4cout << "calling Irecv for islave " << islave << G4endl;
638 MPI::Request request = COMM_G4COMMAND_.Irecv(&buff, 1, MPI::INT,
639 islave, kTAG_G4STATUS);
640 while(! request.Test()) {
641 ::Wait(1000);
642 }
643 }
644 } else {
645 buff = 1;
646 // G4cout << "calling send for i " << kRANK_MASTER << G4endl;
647 COMM_G4COMMAND_.Send(&buff, 1, MPI::INT, kRANK_MASTER, kTAG_G4STATUS);
648 }
649 }
650}
virtual void BeamOn()=0

◆ DistributeSeeds()

void G4MPImanager::DistributeSeeds ( )

Definition at line 383 of file G4MPImanager.cc.

384{
385 // Do nothing if not processing worker
386 if ( is_extra_worker_ ) return;
387
388 std::vector<G4long> seed_list = seed_generator_-> GetSeedList();
389 G4Random::setTheSeed(seed_list[rank_]);
390}

◆ ExecuteMacroFile()

void G4MPImanager::ExecuteMacroFile ( const G4String fname,
G4bool  qbatch = false 
)

Definition at line 551 of file G4MPImanager.cc.

552{
553 G4bool currentmode = qbatchmode_;
554 qbatchmode_ = true;
555 G4MPIbatch* batchSession = new G4MPIbatch(fname, qbatch);
556 batchSession-> SessionStart();
557 delete batchSession;
558 qbatchmode_ = currentmode;
559}

◆ CheckThreadStatus()

G4bool G4MPImanager::CheckThreadStatus ( )

Definition at line 424 of file G4MPImanager.cc.

425{
426 unsigned buff;
427 unsigned qstatus = 0;
428
429 if( is_master_ ) {
430 qstatus = (thread_id_ != 0);
431 // get slave status
432 for ( G4int islave = 1; islave < size_; islave++ ) {
433 MPI::Request request = COMM_G4COMMAND_.Irecv(&buff, 1, MPI::UNSIGNED,
434 islave, kTAG_G4STATUS);
435 while( ! request.Test() ) {
436 ::Wait(1000);
437 }
438 qstatus |= buff;
439 }
440 } else {
441 buff = (thread_id_ !=0);
442 COMM_G4COMMAND_.Send(&buff, 1, MPI::UNSIGNED, kRANK_MASTER, kTAG_G4STATUS);
443 }
444
445 // broadcast
446 buff = qstatus; // for master
447 COMM_G4COMMAND_.Bcast(&buff, 1, MPI::UNSIGNED, kRANK_MASTER);
448 qstatus = buff; // for slave
449
450 if ( qstatus != 0 ) return true;
451 else return false;
452}

◆ ExecuteThreadCommand()

void G4MPImanager::ExecuteThreadCommand ( const G4String command)

Definition at line 455 of file G4MPImanager.cc.

456{
457 // this method is a thread function.
458 G4UImanager* UI = G4UImanager::GetUIpointer();
459 G4int rc = UI-> ApplyCommand(command);
460
461 G4int commandStatus = rc - (rc%100);
462
463 switch( commandStatus ) {
464 case fCommandSucceeded:
465 break;
466 case fIllegalApplicationState:
467 G4cerr << "illegal application state -- command refused" << G4endl;
468 break;
469 default:
470 G4cerr << "command refused (" << commandStatus << ")" << G4endl;
471 break;
472 }
473
474 // thread is joined
475 if ( thread_id_ ) {
476 pthread_join(thread_id_, 0);
477 thread_id_ = 0;
478 }
479
480 return;
481}

◆ ExecuteBeamOnThread()

void G4MPImanager::ExecuteBeamOnThread ( const G4String command)

Definition at line 484 of file G4MPImanager.cc.

485{
486 G4bool threadStatus = CheckThreadStatus();
487
488 if (threadStatus) {
489 if ( is_master_ ) {
490 G4cout << "G4MPIsession:: beamOn is still running." << G4endl;
491 }
492 } else { // ok
493 static G4String cmdstr;
494 cmdstr = command;
495 G4int rc = pthread_create(&thread_id_, 0,
496 (Func_t)thread_ExecuteThreadCommand,
497 (void*)&cmdstr);
498 if (rc != 0)
499 G4Exception("G4MPImanager::ExecuteBeamOnThread()",
500 "MPI003", FatalException,
501 "Failed to create a beamOn thread.");
502 }
503}
void *(* Func_t)(void *)

◆ JoinBeamOnThread()

void G4MPImanager::JoinBeamOnThread ( )

Definition at line 506 of file G4MPImanager.cc.

507{
508 if ( thread_id_ ) {
509 pthread_join(thread_id_, 0);
510 thread_id_ = 0;
511 }
512}

◆ BeamOn()

void G4MPImanager::BeamOn ( G4int  nevent,
G4bool  qdivide = true 
)

Definition at line 562 of file G4MPImanager.cc.

563{
564 // G4cout << "G4MPImanager::BeamOn " << nevent << G4endl;
565
566#ifndef G4MULTITHREADED
567 G4RunManager* runManager = G4RunManager::GetRunManager();
568#endif
569
570 if ( qdivide ) { // events are divided
571 G4double ntot = master_weight_ + size_ - 1.;
572 G4int nproc = G4int(nevent/ntot);
573 G4int nproc0 = nevent - nproc*(size_ - 1);
574 fevents_in_master = nproc0;
575 fevents_in_slave = nproc;
576
577 if ( verbose_ > 0 && is_master_ ) {
578 G4cout << "#events in master=" << nproc0 << " / "
579 << "#events in slave=" << nproc << G4endl;
580 }
581
582 status_-> StartTimer(); // start timer
583
584#ifdef G4MULTITHREADED
585 G4String str_nevt;
586 if ( is_master_ ) str_nevt = G4UIcommand::ConvertToString(nproc0);
587 else str_nevt = G4UIcommand::ConvertToString(nproc);
588 G4UImanager* UI = G4UImanager::GetUIpointer();
589 UI-> ApplyCommand("/run/beamOn " + str_nevt);
590#else
591 if ( is_master_ ) runManager-> BeamOn(nproc0);
592 else runManager-> BeamOn(nproc);
593#endif
594
595 status_-> StopTimer(); // stop timer
596
597 } else { // same events are generated in each node (for test use)
598 if( verbose_ > 0 && is_master_ ) {
599 G4cout << "#events in master=" << nevent << " / "
600 << "#events in slave=" << nevent << G4endl;
601 }
602 status_-> StartTimer(); // start timer
603
604#ifdef G4MULTITHREADED
605 G4String str_nevt = G4UIcommand::ConvertToString(nevent);
606 G4UImanager* UI = G4UImanager::GetUIpointer();
607 UI-> ApplyCommand("/run/beamOn " + str_nevt);
608#else
609 runManager-> BeamOn(nevent);
610#endif
611
612 status_-> StopTimer(); // stop timer
613 }
614}
G4int fevents_in_master
G4int fevents_in_slave
void BeamOn(G4int nevent, G4bool qdivide=true)

◆ Print()

void G4MPImanager::Print ( const G4String message)

Definition at line 653 of file G4MPImanager.cc.

654{
655 if ( is_master_ ){
656 std::cout << message << std::flush;
657 } else {
658 if ( qfcout_ ) { // output to a file
659 fscout_ << message << std::flush;
660 } else { // output to stdout
661 std::cout << rank_ << ":" << message << std::flush;
662 }
663 }
664}
void message(G4RunManager *runmanager)
ts_scorers example shows how to use global scorers.
Definition ts_scorers.cc:71

◆ GetEventsInMaster()

G4int G4MPImanager::GetEventsInMaster ( ) const
inline

Definition at line 113 of file G4MPImanager.hh.

113{return fevents_in_master;}

◆ GetEventsInSlave()

G4int G4MPImanager::GetEventsInSlave ( ) const
inline

Definition at line 114 of file G4MPImanager.hh.

114{return fevents_in_slave;}

◆ ShowHelp()

void G4MPImanager::ShowHelp ( ) const

Definition at line 667 of file G4MPImanager.cc.

668{
669 if (is_slave_ ) return;
670
671 G4cout << "Geant4 MPI interface" << G4endl;
672 G4cout << "usage:" << G4endl;
673 G4cout << "<app> [options] [macro file]"
674 << G4endl << G4endl;
675 G4cout << " -h, --help show this message."
676 << G4endl;
677 G4cout << " -v, --verbose show verbose message"
678 << G4endl;
679 G4cout << " -i, --init=FNAME set an init macro file"
680 << G4endl;
681 G4cout << " -o, --ofile[=FNAME] set slave output to a flie"
682 << G4endl;
683 G4cout << G4endl;
684}

◆ GetComm()

const MPI::Intracomm * G4MPImanager::GetComm ( ) const
inline

Definition at line 119 of file G4MPImanager.hh.

119{ return &COMM_G4COMMAND_; }

◆ GetProcessingComm()

const MPI_Comm * G4MPImanager::GetProcessingComm ( ) const
inline

Definition at line 120 of file G4MPImanager.hh.

120{ return &processing_comm_; }

◆ GetCollectingComm()

const MPI_Comm * G4MPImanager::GetCollectingComm ( ) const
inline

Definition at line 121 of file G4MPImanager.hh.

121{ return &collecting_comm_; }

◆ GetAllComm()

const MPI_Comm * G4MPImanager::GetAllComm ( ) const
inline

Definition at line 122 of file G4MPImanager.hh.

122{ return &all_comm_; }

◆ DISALLOW_COPY_AND_ASSIGN()

G4MPImanager::DISALLOW_COPY_AND_ASSIGN ( G4MPImanager  )
private

◆ Initialize()

void G4MPImanager::Initialize ( )
private

Definition at line 148 of file G4MPImanager.cc.

149{
150 // G4cout << "G4MPImanager::Initialize" << G4endl;
151
152 if ( g4mpi_ != NULL ) {
153 G4Exception("G4MPImanager::Initialize()", "MPI002",
154 FatalException, "G4MPImanager is already instantiated.");
155 }
156
157 g4mpi_ = this;
158
159 // get rank information
160 world_size_ = MPI::COMM_WORLD.Get_size();
161 if ( world_size_ - nof_extra_workers_ <= 0 ) {
162 G4Exception("G4MPImanager::SetExtraWorker()", "MPI001",
163 JustWarning, "Cannot reserve extra ranks: the MPI size is not sufficient.");
165 }
167 rank_ = MPI::COMM_WORLD.Get_rank();
170 is_extra_worker_ = false;
171
172 if ( nof_extra_workers_ ) {
173 // G4cout << "Extra workers requested" << G4endl;
174
175 // Define three groups of workers: processing, collecting and all;
176 // if no extra workers are declared, all world ranks are processing ranks
177
178 // MPI_Group world_group;
179 MPI_Comm_group(MPI_COMM_WORLD, &world_group_);
180
181 // Group 1 - processing ranks
182 int* ranks1 = new int[size_];
183 for (int i=0; i<size_; i++) ranks1[i] = i;
184 // Construct a group containing all of the processing ranks in world_group
185 MPI_Group_incl(world_group_, size_, ranks1, &processing_group_);
186
187 // Group 2 - collecting ranks
188 int* ranks2 = new int[nof_extra_workers_];
189 for (int i=0; i<nof_extra_workers_; i++) ranks2[i] = (world_size_ - nof_extra_workers_) + i;
190 // Construct a group containing all of the collecting ranks in world_group
191 MPI_Group_incl(world_group_, nof_extra_workers_, ranks2, &collecting_group_);
192
193 // Group 3 - all ranks
194 int* ranks3 = new int[world_size_];
195 for (int i=0; i<world_size_; i++) ranks3[i] = i;
196 // Construct a group containing all of the processing ranks in world_group
197 MPI_Group_incl(world_group_, world_size_, ranks3, &all_group_);
198
199 // Create new communicators based on the groups
200 MPI_Comm_create_group(MPI_COMM_WORLD, processing_group_, 0, &processing_comm_);
201 MPI_Comm_create_group(MPI_COMM_WORLD, collecting_group_, 0, &collecting_comm_);
202 MPI_Comm_create_group(MPI_COMM_WORLD, all_group_, 0, &all_comm_);
203
204 // COMM_G4COMMAND_ = processing_comm_ copy
205 COMM_G4COMMAND_ = MPI::Intracomm(processing_comm_);
206
207 } else {
208 // G4cout << "No extra workers requested" << G4endl;
209 // initialize MPI communicator
210 COMM_G4COMMAND_ = MPI::COMM_WORLD.Dup();
211 }
212
213 is_extra_worker_ = (collecting_comm_ != MPI_COMM_NULL);
214
215 // new G4MPI stuffs
217 messenger_-> SetTargetObject(this);
219 status_ = new G4MPIstatus;
220
221 if ( ! is_extra_worker_ ) {
222 // default seed generator is random generator.
225 }
226
227 // print status of this worker
228 // G4cout << this << " world_size_ " << world_size_ << G4endl;
229 // G4cout << this << " size_ " << size_ << G4endl;
230 // G4cout << this << " nof_extra_workers_ " << nof_extra_workers_ << G4endl;
231 // G4cout << this << " is_master_ " << is_master_ << G4endl;
232 // G4cout << this << " is_slave_ " << is_slave_ << G4endl;
233 // G4cout << this << " is_extra_worker_ " << is_extra_worker_ << G4endl;
234 // G4cout << this << " is_processing_worker_ "
235 // << (processing_comm_ != MPI_COMM_NULL) << G4endl;
236}
void DistributeSeeds()

◆ ParseArguments()

void G4MPImanager::ParseArguments ( G4int  argc,
char **  argv 
)
private

Definition at line 239 of file G4MPImanager.cc.

240{
241 G4int qhelp = 0;
242 G4String ofprefix = "mpi";
243
244 G4int c;
245 while ( 1 ) {
246 G4int option_index = 0;
247 static struct option long_options[] = {
248 {"help", no_argument, NULL, 'h'},
249 {"verbose", no_argument, NULL, 'v'},
250 {"init", required_argument, NULL, 'i'},
251 {"ofile", optional_argument, NULL, 'o'},
252 {NULL, 0, NULL, 0}
253 };
254
255 opterr = 0; // suppress message
256 c = getopt_long(argc, argv, "hvi:o", long_options, &option_index);
257 opterr = 1;
258
259 if( c == -1 ) break;
260
261 switch (c) {
262 case 'h' :
263 qhelp = 1;
264 break;
265 case 'v' :
266 verbose_ = 1;
267 break;
268 case 'i' :
269 qinitmacro_ = true;
270 init_file_name_ = optarg;
271 break;
272 case 'o' :
273 qfcout_ = true;
274 if ( optarg ) ofprefix = optarg;
275 break;
276 default:
277 G4cerr << "*** invalid options specified." << G4endl;
278 std::exit(EXIT_FAILURE);
279 break;
280 }
281 }
282
283 // show help
284 if ( qhelp ) {
285 if ( is_master_ ) ShowHelp();
286 MPI::Finalize();
287 std::exit(EXIT_SUCCESS);
288 }
289
290 // file output
291 if( is_slave_ && qfcout_ ) {
292 G4String prefix = ofprefix + ".%03d" + ".cout";
293 char str[1024];
294 sprintf(str, prefix.c_str(), rank_);
295 G4String fname(str);
296 fscout_.open(fname.c_str(), std::ios::out);
297 }
298
299 // non-option ARGV-elements ...
300 if ( optind < argc ) {
301 qbatchmode_ = true;
302 macro_file_name_ = argv[optind];
303 }
304}
void ShowHelp() const

◆ UpdateStatus()

void G4MPImanager::UpdateStatus ( )
private

Definition at line 307 of file G4MPImanager.cc.

308{
309 G4RunManager* runManager = G4RunManager::GetRunManager();
310 const G4Run* run = runManager-> GetCurrentRun();
311
312 G4int runid, eventid, neventTBP;
313
314 G4StateManager* stateManager = G4StateManager::GetStateManager();
315 G4ApplicationState g4state = stateManager-> GetCurrentState();
316
317 if ( run ) {
318 runid = run-> GetRunID();
319 neventTBP = run -> GetNumberOfEventToBeProcessed();
320 eventid = run-> GetNumberOfEvent();
321 if( g4state == G4State_GeomClosed || g4state == G4State_EventProc ) {
322 status_-> StopTimer();
323 }
324 } else {
325 runid = 0;
326 eventid = 0;
327 neventTBP = 0;
328 }
329
330 status_-> SetStatus(rank_, runid, neventTBP, eventid, g4state);
331}

Member Data Documentation

◆ g4mpi_

G4MPImanager * G4MPImanager::g4mpi_ = NULL
staticprivate

Definition at line 131 of file G4MPImanager.hh.

◆ messenger_

G4MPImessenger* G4MPImanager::messenger_
private

Definition at line 132 of file G4MPImanager.hh.

◆ session_

G4MPIsession* G4MPImanager::session_
private

Definition at line 133 of file G4MPImanager.hh.

◆ extra_worker_

G4VMPIextraWorker* G4MPImanager::extra_worker_
private

Definition at line 134 of file G4MPImanager.hh.

◆ seed_generator_

G4VMPIseedGenerator* G4MPImanager::seed_generator_
private

Definition at line 137 of file G4MPImanager.hh.

◆ status_

G4MPIstatus* G4MPImanager::status_
private

Definition at line 139 of file G4MPImanager.hh.

◆ verbose_

G4int G4MPImanager::verbose_
private

Definition at line 141 of file G4MPImanager.hh.

◆ is_master_

G4bool G4MPImanager::is_master_
private

Definition at line 144 of file G4MPImanager.hh.

◆ is_slave_

G4bool G4MPImanager::is_slave_
private

Definition at line 145 of file G4MPImanager.hh.

◆ is_extra_worker_

G4bool G4MPImanager::is_extra_worker_
private

Definition at line 146 of file G4MPImanager.hh.

◆ rank_

G4int G4MPImanager::rank_
private

Definition at line 147 of file G4MPImanager.hh.

◆ size_

G4int G4MPImanager::size_
private

Definition at line 148 of file G4MPImanager.hh.

◆ world_size_

G4int G4MPImanager::world_size_
private

Definition at line 149 of file G4MPImanager.hh.

◆ COMM_G4COMMAND_

MPI::Intracomm G4MPImanager::COMM_G4COMMAND_
private

Definition at line 152 of file G4MPImanager.hh.

◆ processing_comm_

MPI_Comm G4MPImanager::processing_comm_
private

Definition at line 154 of file G4MPImanager.hh.

◆ collecting_comm_

MPI_Comm G4MPImanager::collecting_comm_
private

Definition at line 156 of file G4MPImanager.hh.

◆ all_comm_

MPI_Comm G4MPImanager::all_comm_
private

Definition at line 158 of file G4MPImanager.hh.

◆ world_group_

MPI_Group G4MPImanager::world_group_
private

Definition at line 160 of file G4MPImanager.hh.

◆ processing_group_

MPI_Group G4MPImanager::processing_group_
private

Definition at line 161 of file G4MPImanager.hh.

◆ collecting_group_

MPI_Group G4MPImanager::collecting_group_
private

Definition at line 162 of file G4MPImanager.hh.

◆ all_group_

MPI_Group G4MPImanager::all_group_
private

Definition at line 163 of file G4MPImanager.hh.

◆ qfcout_

G4bool G4MPImanager::qfcout_
private

Definition at line 166 of file G4MPImanager.hh.

◆ fscout_

std::ofstream G4MPImanager::fscout_
private

Definition at line 167 of file G4MPImanager.hh.

◆ qinitmacro_

G4bool G4MPImanager::qinitmacro_
private

Definition at line 170 of file G4MPImanager.hh.

◆ init_file_name_

G4String G4MPImanager::init_file_name_
private

Definition at line 171 of file G4MPImanager.hh.

◆ qbatchmode_

G4bool G4MPImanager::qbatchmode_
private

Definition at line 172 of file G4MPImanager.hh.

◆ macro_file_name_

G4String G4MPImanager::macro_file_name_
private

Definition at line 173 of file G4MPImanager.hh.

◆ thread_id_

pthread_t G4MPImanager::thread_id_
private

Definition at line 176 of file G4MPImanager.hh.

◆ fevents_in_master

G4int G4MPImanager::fevents_in_master = 0
private

Definition at line 177 of file G4MPImanager.hh.

◆ fevents_in_slave

G4int G4MPImanager::fevents_in_slave = 0
private

Definition at line 178 of file G4MPImanager.hh.

◆ master_weight_

G4double G4MPImanager::master_weight_
private

Definition at line 181 of file G4MPImanager.hh.

◆ nof_extra_workers_

G4int G4MPImanager::nof_extra_workers_
private

Definition at line 182 of file G4MPImanager.hh.


The documentation for this class was generated from the following files:

Applications | User Support | Publications | Collaboration