#include #include #include #include #include #include "mpi.h" #ifdef BUILD_MANAGER void choose_worker_program( char *s, size_t slen ) { strncpy(s, "./worker", slen); s[slen - 1] = '\0'; } int main(int argc, char *argv[]) { int world_size, universe_size, *universe_sizep, flag, ball, rc; MPI_Comm everyone; /* intercommunicator */ char worker_program[100]; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &world_size); if (world_size != 1) error(0, 0, "Top heavy with management"); MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_UNIVERSE_SIZE, &universe_sizep, &flag); if (!flag) { printf("This MPI does not support UNIVERSE_SIZE. How many\nprocesses total?"); scanf("%d", &universe_size); } else universe_size = *universe_sizep; if (universe_size == 1) error(0, 0, "No room to start workers"); /* * Now spawn the workers. Note that there is a run-time determination * of what type of worker to spawn, and presumably this calculation must * be done at run time and cannot be calculated before starting * the program. If everything is known when the application is * first started, it is generally better to start them all at once * in a single MPI_COMM_WORLD. */ int spawnError; choose_worker_program(worker_program, sizeof(worker_program)); rc=MPI_Comm_spawn(worker_program, MPI_ARGV_NULL, universe_size-world_size, MPI_INFO_NULL, 0, MPI_COMM_WORLD, &everyone, &spawnError); printf("%s: MPI_Comm_spawn() = %d (%d)\n", argv[0], rc, spawnError); fflush(stdout); /* * Parallel code here. The communicator "everyone" can be used * to communicate with the spawned processes, which have ranks 0,.. * MPI_UNIVERSE_SIZE-1 in the remote group of the intercommunicator * "everyone". */ srandom(time(NULL) & getpid()); ball = random(); printf("%s: Broadcasting ball = %08X\n", argv[0], ball); MPI_Bcast(&ball, 1, MPI_INT, MPI_ROOT, everyone); MPI_Barrier(everyone); MPI_Finalize(); return 0; } #else int main(int argc, char *argv[], char* env[]) { int rank=0, size=0, parentSize=0, ball=0xCAFEBABE, rc; MPI_Comm parent = NULL; /*char** p = env; while ( *p ) printf("%s\n", *p++);*/ unsetenv("PSM2_DEVICES"); rc=MPI_Init(&argc, &argv); printf("%s: MPI_Init() called (%d)\n", argv[0], rc); fflush(stdout); rc=MPI_Comm_get_parent(&parent); if (parent == MPI_COMM_NULL) error(0, 0, "No parent!"); printf("%s: MPI_Comm_get_parent() = %p (%d)\n", argv[0], (void*)parent, rc);fflush(stdout); rc=MPI_Comm_size(MPI_COMM_WORLD, &size); printf("%s: MPI_Comm_size() = %d (%d)\n", argv[0], size, rc); rc=MPI_Comm_rank(MPI_COMM_WORLD, &rank); printf("%s: MPI_Comm_rank() = %d (%d)\n", argv[0], rank, rc); rc=MPI_Comm_remote_size(parent, &parentSize); if (parentSize != 1) error(0, 0, "Something's wrong with the parent"); printf("%s: MPI_Comm_remote_size() = %d (%d)\n", argv[0], parentSize, rc); /* * Parallel code here. * The manager is represented as the process with rank 0 in (the remote * group of) MPI_COMM_PARENT. If the workers need to communicate among * themselves, they can use MPI_COMM_WORLD. */ rc=MPI_Bcast(&ball, 1, MPI_INT, 0, parent); printf("%s: %d/%d[%d] : ball = %08X (%d)\n", argv[0], rank, size, parentSize, ball, rc); MPI_Barrier(parent); MPI_Finalize(); return 0; } #endif