- Timestamp:
- Apr 17, 2013, 6:35:40 PM (13 years ago)
- Children:
- 4f3474
- Parents:
- d6a338
- Location:
- src/comm
- Files:
-
- 7 edited
-
comm.hpp (modified) (4 diffs)
-
comm_mpi.cpp (modified) (2 diffs)
-
comm_mpi.hpp (modified) (1 diff)
-
comm_serial.cpp (modified) (2 diffs)
-
comm_serial.hpp (modified) (1 diff)
-
mpi/settings.cpp (modified) (3 diffs)
-
mpi/settings.hpp (modified) (3 diffs)
Legend:
- Unmodified
- Added
- Removed
-
src/comm/comm.hpp
rd6a338 r177495 30 30 31 31 #include <map> 32 #include < list>32 #include <vector> 33 33 34 34 #include "base/defs.hpp" … … 41 41 { 42 42 43 class BoundaryValue; 43 44 class DomainDecomposition; 44 45 class GlobalIndices; … … 70 71 virtual void CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction) = 0; 71 72 virtual void CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction) = 0; 73 virtual std::vector<BoundaryValue> CommBoundaryValues() = 0; 72 74 73 75 virtual void CommToGhostsAsyncStart(Grid& grid) = 0; … … 139 141 const std::map<Index, std::vector<GlobalIndices> >& DecomposedGlobal() const {return decomposed_global;} 140 142 141 142 143 protected: 143 144 const std::string& OutputPath(); -
src/comm/comm_mpi.cpp
rd6a338 r177495 58 58 #include "comm/comm_mpi.hpp" 59 59 #include "comm/mpi/datatypes_local.hpp" 60 #include "discretization/boundary_value_setter.hpp" 60 61 #include "grid/grid.hpp" 61 62 #include "grid/multigrid.hpp" … … 126 127 AddBufferAll(grid_new, datatypes.Receive()); 127 128 } 129 } 130 131 std::vector<BoundaryValue> CommMPI::CommBoundaryValues() 132 { 133 MPI_Comm comm = settings.CommunicatorLocal((*MG::GetRhs())(MG::GetRhs()->MaxLevel())); 134 if (comm != MPI_COMM_NULL) { 135 136 const std::vector<BoundaryValue>& bvs = MG::GetBoundaryValueSetter()->BoundaryValues(); 137 138 std::vector<vmg_float> val_buffer; val_buffer.reserve(bvs.size()); 139 140 int comm_size, comm_rank; 141 MPI_Comm_rank(comm, &comm_rank); 142 MPI_Comm_size(comm, &comm_size); 143 144 std::vector< std::vector<BoundaryValue> > bvs_distributed(comm_size); 145 146 for (unsigned int i=0; i<bvs.size(); ++i) 147 bvs_distributed[settings.BoundaryRanks()[i]].push_back(bvs[i]); 148 149 int bvs_count[comm_size]; 150 for (int i=0; i<comm_size; ++i) { 151 bvs_count[i] = bvs_distributed[i].size(); 152 for (unsigned int j=0; j<bvs_distributed[i].size(); ++j) 153 val_buffer.push_back(bvs_distributed[i][j].Val()); 154 } 155 156 MPI_Reduce_scatter(MPI_IN_PLACE, &val_buffer.front(), bvs_count, MPI_DOUBLE, MPI_SUM, comm); 157 158 int begin = 0; 159 for (int i=0; i<comm_rank; ++i) 160 begin += bvs_distributed[i].size(); 161 162 for (unsigned int i=0; i<bvs_distributed[comm_rank].size(); ++i) 163 bvs_distributed[comm_rank][i].Val() = val_buffer[begin+i]; 164 165 return bvs_distributed[comm_rank]; 166 } 167 168 return std::vector<BoundaryValue>(); 128 169 } 129 170 -
src/comm/comm_mpi.hpp
rd6a338 r177495 81 81 void CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction); 82 82 void CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction); 83 std::vector<BoundaryValue> CommBoundaryValues(); 83 84 84 85 void CommToGhostsAsyncStart(Grid& grid); -
src/comm/comm_serial.cpp
rd6a338 r177495 45 45 #include "base/vector.hpp" 46 46 #include "comm/comm_serial.hpp" 47 #include "discretization/boundary_value_setter.hpp" 47 48 #include "grid/multigrid.hpp" 48 49 #include "grid/tempgrid.hpp" … … 79 80 for (iter = grid_old.Iterators().CompleteGrid().Begin(); iter != grid_old.Iterators().CompleteGrid().End(); ++iter) 80 81 grid_new(*iter) += grid_old.GetVal(*iter); 82 } 83 84 std::vector<BoundaryValue> CommSerial::CommBoundaryValues() 85 { 86 return MG::GetBoundaryValueSetter()->BoundaryValues(); 81 87 } 82 88 -
src/comm/comm_serial.hpp
rd6a338 r177495 58 58 void CommSubgrid(Grid& grid_old, Grid& grid_new, const int& direction); 59 59 void CommAddSubgrid(Grid& grid_old, Grid& grid_new, const int& direction); 60 std::vector<BoundaryValue> CommBoundaryValues(); 60 61 61 62 void CommToGhostsAsyncStart(Grid& grid); -
src/comm/mpi/settings.cpp
rd6a338 r177495 44 44 #include <string> 45 45 46 #include "base/index.hpp" 46 47 #include "comm/comm.hpp" 47 48 #include "comm/mpi/settings.hpp" 49 #include "discretization/boundary_value_setter.hpp" 48 50 #include "grid/multigrid.hpp" 49 51 #include "grid/tempgrid.hpp" … … 218 220 AddDatatypeGlobal(FinerGrid(sol(i-1)), sol(i), 1); 219 221 } 222 223 InitializeBoundaryValues(); 220 224 } 221 225 … … 428 432 } 429 433 434 void VMG::MPI::Settings::InitializeBoundaryValues() 435 { 436 assert(bv_ranks.size() == 0); 437 438 if (MG::GetFactory().TestObject("BOUNDARY_VALUE_SETTER")) { 439 440 Index coord; 441 442 const int level_index = MG::GetRhs()->MaxLevel() - MG::GetRhs()->GlobalMaxLevel(); 443 const std::vector<BoundaryValue>& bvs = MG::GetBoundaryValueSetter()->BoundaryValues(); 444 const std::map<Index, std::vector<GlobalIndices> >& global = MG::GetComm()->DecomposedGlobal(); 445 446 assert(global.find(0)->second[level_index].BoundaryType() == GlobalMax); 447 448 MPI_Comm comm = CommunicatorLocal((*MG::GetRhs())(MG::GetRhs()->GlobalMaxLevel())); 449 450 bv_ranks.reserve(bvs.size()); 451 452 for (std::vector<BoundaryValue>::const_iterator iter_b = bvs.begin(); iter_b != bvs.end(); ++iter_b) { 453 for (std::map<Index, std::vector<GlobalIndices> >::const_iterator iter_g = global.begin(); iter_g != global.end(); ++iter_g) { 454 if (iter_b->GetIndex().IsComponentwiseGreaterOrEqual(iter_g->second[level_index].LocalBegin()) && 455 iter_b->GetIndex().IsComponentwiseLess(iter_g->second[level_index].LocalEnd())) { 456 bv_ranks.push_back(0); 457 coord = iter_g->first; 458 MPI_Cart_rank(comm, coord.vec(), &bv_ranks.back()); 459 break; 460 } 461 } 462 } 463 } 464 } 465 430 466 std::string VMG::MPI::Settings::ToString() const 431 467 { -
src/comm/mpi/settings.hpp
rd6a338 r177495 70 70 VMG::MPI::DatatypesLocal& DatatypesLocal(const Grid& grid); 71 71 72 const std::vector<int>& BoundaryRanks() const {return bv_ranks;} 73 72 74 std::string ToString() const; 73 75 … … 79 81 void CreateLocalCommunicator(MPI_Comm& comm_global, const Grid& grid); 80 82 83 void InitializeBoundaryValues(); 84 81 85 std::map<int, MPI_Comm> communicators_global; 82 86 std::map<KeyUnsorted, MPI_Comm> communicators_local; … … 86 90 std::map<KeyUnsorted, VMG::MPI::DatatypesGlobal> datatypes_global; 87 91 std::map<KeyUnsorted, VMG::MPI::DatatypesLocal> datatypes_local; 92 93 std::vector<int> bv_ranks; 94 88 95 }; 89 96
Note:
See TracChangeset
for help on using the changeset viewer.
