33#include "G4RunManager.hh"
34#include "G4StateManager.hh"
35#include "G4UIcommand.hh"
36#include "G4UImanager.hh"
51void thread_ExecuteThreadCommand(
const G4String* command)
59 struct timespec treq, trem;
61 treq.tv_nsec = ausec*1000;
63 nanosleep(&treq, &trem);
71 COMM_G4COMMAND_(MPI_COMM_NULL), processing_comm_(MPI_COMM_NULL),
72 collecting_comm_(MPI_COMM_NULL), all_comm_(MPI_COMM_NULL),
73 qfcout_(false), qinitmacro_(false), qbatchmode_(false),
74 thread_id_(0), master_weight_(1.), nof_extra_workers_(nof_extra_workers)
77 MPI::Init_thread(MPI::THREAD_SERIALIZED);
84 COMM_G4COMMAND_(MPI_COMM_NULL), processing_comm_(MPI_COMM_NULL),
85 collecting_comm_(MPI_COMM_NULL), all_comm_(MPI_COMM_NULL),
86 qfcout_(false), qinitmacro_(false), qbatchmode_(false),
87 thread_id_(0), master_weight_(1.), nof_extra_workers_(nof_extra_workers)
90 MPI::Init_thread(argc, argv, MPI::THREAD_SERIALIZED);
130 G4Exception(
"G4MPImanager::GetManager()",
"MPI001",
131 FatalException,
"G4MPImanager is not instantiated.");
140 G4Exception(
"G4MPImanager::SetExtraWorker()",
"MPI001",
141 FatalException,
"Number of extra workers >0 must be set first.");
153 G4Exception(
"G4MPImanager::Initialize()",
"MPI002",
154 FatalException,
"G4MPImanager is already instantiated.");
162 G4Exception(
"G4MPImanager::SetExtraWorker()",
"MPI001",
163 JustWarning,
"Cannot reserve extra ranks: the MPI size is not sufficient.");
167 rank_ = MPI::COMM_WORLD.Get_rank();
182 int* ranks1 =
new int[
size_];
183 for (
int i=0; i<
size_; i++) ranks1[i] = i;
246 G4int option_index = 0;
247 static struct option long_options[] = {
248 {
"help", no_argument, NULL,
'h'},
249 {
"verbose", no_argument, NULL,
'v'},
250 {
"init", required_argument, NULL,
'i'},
251 {
"ofile", optional_argument, NULL,
'o'},
256 c = getopt_long(argc, argv,
"hvi:o", long_options, &option_index);
274 if ( optarg ) ofprefix = optarg;
277 G4cerr <<
"*** invalid options specified." << G4endl;
278 std::exit(EXIT_FAILURE);
287 std::exit(EXIT_SUCCESS);
292 G4String prefix = ofprefix +
".%03d" +
".cout";
294 sprintf(str, prefix.c_str(),
rank_);
296 fscout_.open(fname.c_str(), std::ios::out);
300 if ( optind < argc ) {
309 G4RunManager* runManager = G4RunManager::GetRunManager();
310 const G4Run* run = runManager-> GetCurrentRun();
312 G4int runid, eventid, neventTBP;
315 G4ApplicationState g4state = stateManager-> GetCurrentState();
318 runid = run-> GetRunID();
319 neventTBP = run -> GetNumberOfEventToBeProcessed();
320 eventid = run-> GetNumberOfEvent();
321 if( g4state == G4State_GeomClosed || g4state == G4State_EventProc ) {
330 status_-> SetStatus(
rank_, runid, neventTBP, eventid, g4state);
344 G4int nev =
status_-> GetEventID();
345 G4int nevtp =
status_-> GetNEventToBeProcessed();
346 G4double cputime =
status_-> GetCPUTime();
349 for ( G4int islave = 1; islave <
size_; islave++ ) {
357 nevtp +=
status_-> GetNEventToBeProcessed();
358 cputime +=
status_-> GetCPUTime();
368 G4cout <<
"-------------------------------------------------------"
370 <<
"* #ranks= " <<
size_
371 <<
" event= " << nev <<
"/" << nevtp
372 <<
" state= " << strStatus
373 <<
" time= " << cputime <<
"s"
389 G4Random::setTheSeed(seed_list[
rank_]);
399 G4cout <<
"* rank= " <<
rank_
400 <<
" seed= " << G4Random::getTheSeed()
403 for ( G4int islave = 1; islave <
size_; islave++ ) {
405 G4cout <<
"* rank= " << islave
410 buff = G4Random::getTheSeed();
418 if(
rank_ == inode ) {
419 CLHEP::HepRandom::setTheSeed(seed);
427 unsigned qstatus = 0;
432 for ( G4int islave = 1; islave <
size_; islave++ ) {
435 while( ! request.Test() ) {
450 if ( qstatus != 0 )
return true;
459 G4int rc = UI-> ApplyCommand(command);
461 G4int commandStatus = rc - (rc%100);
463 switch( commandStatus ) {
464 case fCommandSucceeded:
466 case fIllegalApplicationState:
467 G4cerr <<
"illegal application state -- command refused" << G4endl;
470 G4cerr <<
"command refused (" << commandStatus <<
")" << G4endl;
490 G4cout <<
"G4MPIsession:: beamOn is still running." << G4endl;
496 (
Func_t)thread_ExecuteThreadCommand,
499 G4Exception(
"G4MPImanager::ExecuteBeamOnThread()",
500 "MPI003", FatalException,
501 "Failed to create a beamOn thread.");
520 enum { kBUFF_SIZE = 512 };
521 static char sbuff[kBUFF_SIZE];
522 command.copy(sbuff, kBUFF_SIZE);
523 G4int len = command.size();
533 for ( G4int islave = 1; islave <
size_; islave++ ) {
539 MPI::Request request=
COMM_G4COMMAND_.Irecv(sbuff, kBUFF_SIZE, MPI::CHAR,
542 while(! request.Test()) {
556 batchSession-> SessionStart();
566#ifndef G4MULTITHREADED
567 G4RunManager* runManager = G4RunManager::GetRunManager();
572 G4int nproc = G4int(nevent/ntot);
573 G4int nproc0 = nevent - nproc*(
size_ - 1);
578 G4cout <<
"#events in master=" << nproc0 <<
" / "
579 <<
"#events in slave=" << nproc << G4endl;
584#ifdef G4MULTITHREADED
586 if (
is_master_ ) str_nevt = G4UIcommand::ConvertToString(nproc0);
587 else str_nevt = G4UIcommand::ConvertToString(nproc);
589 UI-> ApplyCommand(
"/run/beamOn " + str_nevt);
592 else runManager->
BeamOn(nproc);
599 G4cout <<
"#events in master=" << nevent <<
" / "
600 <<
"#events in slave=" << nevent << G4endl;
604#ifdef G4MULTITHREADED
605 G4String str_nevt = G4UIcommand::ConvertToString(nevent);
607 UI-> ApplyCommand(
"/run/beamOn " + str_nevt);
609 runManager->
BeamOn(nevent);
624 G4cout <<
"Calling extra_worker " << G4endl;
627 G4cout <<
" !!!! extra_worker_ is not defined " << G4endl;
636 for (G4int islave = 1; islave <
size_; islave++) {
640 while(! request.Test()) {
656 std::cout <<
message << std::flush;
671 G4cout <<
"Geant4 MPI interface" << G4endl;
672 G4cout <<
"usage:" << G4endl;
673 G4cout <<
"<app> [options] [macro file]"
675 G4cout <<
" -h, --help show this message."
677 G4cout <<
" -v, --verbose show verbose message"
679 G4cout <<
" -i, --init=FNAME set an init macro file"
681 G4cout <<
" -o, --ofile[=FNAME] set slave output to a flie"
An implementation of random number seed distribution.
A terminal session for MPI application.
status of MPI application
void ExecuteThreadCommand(const G4String &command)
G4MPImessenger * messenger_
G4String macro_file_name_
G4bool CheckThreadStatus()
void ExecuteBeamOnThread(const G4String &command)
void ExecuteMacroFile(const G4String &fname, G4bool qbatch=false)
MPI_Group collecting_group_
MPI_Group processing_group_
static G4MPImanager * GetManager()
MPI::Intracomm COMM_G4COMMAND_
static G4MPImanager * g4mpi_
void ParseArguments(G4int argc, char **argv)
void SetSeed(G4int inode, G4long seed)
G4VMPIseedGenerator * seed_generator_
void Print(const G4String &message)
G4VMPIextraWorker * extra_worker_
void BeamOn(G4int nevent, G4bool qdivide=true)
G4String BcastCommand(const G4String &command)
MPI_Comm collecting_comm_
MPI_Comm processing_comm_
G4MPImanager(int nof_extra_workers=0)
void SetExtraWorker(G4VMPIextraWorker *extraWorker)
void message(G4RunManager *runmanager)
ts_scorers example shows how to use global scorers.