Changeset 2a5451
- Timestamp:
- May 11, 2012, 8:36:18 PM (14 years ago)
- Children:
- 2d4211
- Parents:
- 36d56c
- Location:
- src
- Files:
-
- 7 edited
-
base/command_list.cpp (modified) (2 diffs)
-
base/timer.cpp (modified) (3 diffs)
-
comm/comm.hpp (modified) (1 diff)
-
comm/comm_mpi.cpp (modified) (3 diffs)
-
comm/comm_mpi.hpp (modified) (1 diff)
-
mg.cpp (modified) (2 diffs)
-
units/particle/interface_fcs.cpp (modified) (2 diffs)
Legend:
- Unmodified
- Added
- Removed
-
src/base/command_list.cpp
r36d56c r2a5451 62 62 63 63 #ifdef DEBUG_BARRIER 64 Comm& comm = *MG::GetComm(); 64 65 #ifdef HAVE_MPI 65 MPI_Barrier(MPI_COMM_WORLD);66 comm.Barrier(); 66 67 #endif 67 MG::GetComm()->PrintStringOnce("Command \"%s\" start", iter->first.c_str());68 comm.PrintStringOnce("Command \"%s\" start", iter->first.c_str()); 68 69 #endif 69 70 … … 74 75 #ifdef DEBUG_BARRIER 75 76 #ifdef HAVE_MPI 76 MPI_Barrier(MPI_COMM_WORLD);77 comm.Barrier(); 77 78 #endif 78 MG::GetComm()->PrintStringOnce("Command \"%s\" done", iter->first.c_str());79 comm.PrintStringOnce("Command \"%s\" done", iter->first.c_str()); 79 80 #endif 80 81 -
src/base/timer.cpp
r36d56c r2a5451 185 185 Comm& comm = *MG::GetComm(); 186 186 char name[80]; 187 int rank, size; 188 189 MPI_Comm_rank(MPI_COMM_WORLD, &rank); 190 MPI_Comm_size(MPI_COMM_WORLD, &size); 187 188 int rank = comm.GlobalRank(); 189 int size = comm.GlobalSize(); 191 190 192 191 vmg_float times[size]; … … 196 195 197 196 int timer_size = Timer::td.size(); 198 MPI_Bcast(timer_size, 1, MPI_INT, 0, MPI_COMM_WORLD);197 comm.GlobalBroadcast(timer_size); 199 198 200 199 if (rank == 0) { 201 200 for (iter=Timer::td.begin(); iter!=Timer::td.end(); ++iter) { 202 201 std::strcpy(name, iter->first.c_str()); 203 MPI_Bcast(name, 80, MPI_CHAR, 0, MPI_COMM_WORLD);204 MPI_Gather(&Timer::td[name].duration, 1, MPI_DOUBLE, times, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);205 MPI_Gather(&Timer::td[name].total, 1, MPI_INT, calls, 1, MPI_INT, 0, MPI_COMM_WORLD);202 comm.GlobalBroadcast(name); 203 comm.GlobalGather(Timer::td[name].duration, times); 204 comm.GlobalGather(Timer::td[name].total, calls); 206 205 207 206 int min_calls, max_calls; … … 227 226 }else { 228 227 for (int i=0; i<timer_size; ++i) { 229 MPI_Bcast(name, 80, MPI_CHAR, 0, MPI_COMM_WORLD);230 MPI_Gather(&Timer::td[name].duration, 1, MPI_DOUBLE, times, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);231 MPI_Gather(&Timer::td[name].total, 1, MPI_INT, calls, 1, MPI_INT, 0, MPI_COMM_WORLD);228 comm.GlobalBroadcast(name); 229 comm.GlobalGather(Timer::td[name].duration, times); 230 comm.GlobalGather(Timer::td[name].total, calls); 232 231 } 233 232 } -
src/comm/comm.hpp
r36d56c r2a5451 71 71 virtual void CommFromGhostsAsyncFinish(Grid& grid) = 0; 72 72 73 virtual void Barrier() {} 74 73 75 virtual vmg_float GlobalSum(const vmg_float& value) {return value;} 74 76 virtual vmg_float GlobalSumRoot(const vmg_float& value) {return value;} 75 77 virtual void GlobalSumArray(vmg_float* array, const vmg_int& size) {} 78 virtual vmg_float GlobalMax(const vmg_float& value) {return value;} 79 virtual vmg_float GlobalMaxRoot(const vmg_float& value) {return value;} 80 virtual void GlobalMaxArray(vmg_float* array, const vmg_int& size) {} 81 virtual void GlobalBroadcast(vmg_float& value) {} 82 virtual void GlobalGather(vmg_float& value, vmg_float* array) {array[0] = value;} 76 83 77 84 virtual vmg_int GlobalSum(const vmg_int& value) {return value;} 78 85 virtual vmg_int GlobalSumRoot(const vmg_int& value) {return value;} 79 86 virtual void GlobalSumArray(vmg_int* array, const vmg_int& size) {} 80 81 virtual vmg_float GlobalMax(const vmg_float& value) {return value;}82 virtual vmg_float GlobalMaxRoot(const vmg_float& value) {return value;}83 virtual void GlobalMaxArray(vmg_float* array, const vmg_int& size) {}84 85 87 virtual vmg_int GlobalMax(const vmg_int& value) {return value;} 86 88 virtual vmg_int GlobalMaxRoot(const vmg_int& value) {return value;} 87 89 virtual void GlobalMaxArray(vmg_int* array, const vmg_int& size) {} 90 virtual void GlobalBroadcast(vmg_int& value) {} 91 virtual void GlobalGather(vmg_int& value, vmg_int* array) {array[0] = value;} 92 93 virtual void GlobalBroadcast(char* str) {} 88 94 89 95 virtual vmg_float LevelSum(const Grid& grid, const vmg_float& value) {return value;} -
src/comm/comm_mpi.cpp
r36d56c r2a5451 186 186 } 187 187 188 void CommMPI::Barrier() 189 { 190 MPI_Barrier(comm_global); 191 } 192 188 193 vmg_float CommMPI::GlobalSum(const vmg_float& value) 189 194 { … … 206 211 } 207 212 213 void CommMPI::GlobalBroadcast(vmg_float& value) 214 { 215 MPI_Bcast(&value, 1, MPI_DOUBLE, 0, comm_global); 216 } 217 218 void CommMPI::GlobalGather(vmg_float& value, vmg_float* array) 219 { 220 MPI_Gather(&value, 1, MPI_DOUBLE, array, 1, MPI_DOUBLE, 0, comm_global); 221 } 222 208 223 vmg_int CommMPI::GlobalSum(const vmg_int& value) 209 224 { … … 264 279 { 265 280 MPI_Allreduce(MPI_IN_PLACE, array, size, MPI_INT, MPI_MAX, comm_global); 281 } 282 283 void CommMPI::GlobalBroadcast(vmg_int& value) 284 { 285 MPI_Bcast(&value, 1, MPI_INT, 0, comm_global); 286 } 287 288 void CommMPI::GlobalGather(vmg_int& value, vmg_int* array) 289 { 290 MPI_Gather(&value, 1, MPI_INT, array, 1, MPI_INT, 0, comm_global); 291 } 292 293 void CommMPI::GlobalBroadcast(char* str) 294 { 295 MPI_Bcast(str, std::strlen(str)+1, MPI_CHAR, 0, comm_global); 266 296 } 267 297 -
src/comm/comm_mpi.hpp
r36d56c r2a5451 87 87 void CommFromGhostsAsyncFinish(Grid& grid); 88 88 89 void Barrier(); 90 89 91 vmg_float GlobalSum(const vmg_float& value); 90 92 vmg_float GlobalSumRoot(const vmg_float& value); 91 93 void GlobalSumArray(vmg_float* array, const vmg_int& size); 94 vmg_float GlobalMax(const vmg_float& value); 95 vmg_float GlobalMaxRoot(const vmg_float& value); 96 void GlobalMaxArray(vmg_float* array, const vmg_int& size); 97 void GlobalBroadcast(vmg_float& value); 98 void GlobalGather(vmg_float& value, vmg_float* array); 92 99 93 100 vmg_int GlobalSum(const vmg_int& value); 94 101 vmg_int GlobalSumRoot(const vmg_int& value); 95 102 void GlobalSumArray(vmg_int* array, const vmg_int& size); 96 97 vmg_float GlobalMax(const vmg_float& value);98 vmg_float GlobalMaxRoot(const vmg_float& value);99 void GlobalMaxArray(vmg_float* array, const vmg_int& size);100 101 103 vmg_int GlobalMax(const vmg_int& value); 102 104 vmg_int GlobalMaxRoot(const vmg_int& value); 103 105 void GlobalMaxArray(vmg_int* array, const vmg_int& size); 106 void GlobalBroadcast(vmg_int& value); 107 void GlobalGather(vmg_int& value, vmg_int* array); 108 109 void GlobalBroadcast(char* str); 104 110 105 111 vmg_float LevelSum(const Grid& grid, const vmg_float& value); -
src/mg.cpp
r36d56c r2a5451 158 158 #ifdef DEBUG_MEASURE_TIME 159 159 #ifdef HAVE_MPI 160 MPI_Barrier(MPI_COMM_WORLD);160 GetComm()->Barrier(); 161 161 #endif 162 162 Timer::Start("CompleteRunningTime"); … … 175 175 #ifdef DEBUG_MEASURE_TIME 176 176 #ifdef HAVE_MPI 177 MPI_Barrier(MPI_COMM_WORLD);177 GetComm()->Barrier(); 178 178 #endif 179 179 Timer::Stop("CompleteRunningTime"); -
src/units/particle/interface_fcs.cpp
r36d56c r2a5451 107 107 MPI_Errhandler mpiErrorHandler; 108 108 MPI_Comm_create_errhandler(VMG::MPI::ConvertToException, &mpiErrorHandler); 109 MPI_Comm_set_errhandler( MPI_COMM_WORLD, mpiErrorHandler);109 MPI_Comm_set_errhandler(mpi_comm, mpiErrorHandler); 110 110 #endif 111 111 … … 119 119 * Register communication class. 120 120 */ 121 Comm* comm = new Particle::CommMPI(boundary, new DomainDecompositionMPI() );121 Comm* comm = new Particle::CommMPI(boundary, new DomainDecompositionMPI(), mpi_comm); 122 122 comm->Register("COMM"); 123 123
Note:
See TracChangeset
for help on using the changeset viewer.
