Ignore:
Timestamp:
Apr 5, 2013, 12:39:30 PM (13 years ago)
Author:
Julian Iseringhausen <julian.iseringhausen@…>
Children:
4a709e
Parents:
f57182
Message:

Merge stashed open boundary stuff.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • src/comm/mpi/settings.cpp

    rf57182 r8180d8  
    9696
    9797    if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
    98         temp_grid->Global().LocalBegin().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd()) &&
    99         temp_grid->Global().LocalEnd().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
    100         temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
     98        temp_grid->Global().LocalBegin().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd()) &&
     99        temp_grid->Global().LocalEnd().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
     100        temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
    101101      delete temp_grid;
    102102      coarser_grids.insert(std::make_pair(&sol(i), &sol(i-1)));
     
    113113
    114114    if (temp_grid->Global().LocalBegin().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
    115         temp_grid->Global().LocalBegin().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd()) &&
    116         temp_grid->Global().LocalEnd().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
    117         temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
     115        temp_grid->Global().LocalBegin().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd()) &&
     116        temp_grid->Global().LocalEnd().IsComponentwiseGreaterOrEqual(sol(i-1).Global().LocalBegin()) &&
     117        temp_grid->Global().LocalEnd().IsComponentwiseLessOrEqual(sol(i-1).Global().LocalEnd())) {
    118118      delete temp_grid;
    119119      coarser_grids.insert(std::make_pair(&rhs(i), &rhs(i-1)));
     
    133133
    134134    if (temp_grid->Global().LocalBegin() == sol(i+1).Global().LocalBegin() &&
    135         temp_grid->Global().LocalEnd() == sol(i+1).Global().LocalEnd()) {
     135        temp_grid->Global().LocalEnd() == sol(i+1).Global().LocalEnd()) {
    136136      delete temp_grid;
    137137      finer_grids.insert(std::make_pair(&sol(i), &sol(i+1)));
     
    148148
    149149    if (temp_grid->Global().LocalBegin() == rhs(i+1).Global().LocalBegin() &&
    150         temp_grid->Global().LocalEnd() == rhs(i+1).Global().LocalEnd()) {
     150        temp_grid->Global().LocalEnd() == rhs(i+1).Global().LocalEnd()) {
    151151      delete temp_grid;
    152152      finer_grids.insert(std::make_pair(&rhs(i), &rhs(i+1)));
     
    220220
    221221  const bool in_communicator = (grid_1->Global().LocalSize().Product() > 0) ||
    222                                (grid_2 && grid_2->Global().LocalSize().Product() > 0) ||
    223                                (grid_3 && grid_3->Global().LocalSize().Product() > 0);
     222      (grid_2 && grid_2->Global().LocalSize().Product() > 0) ||
     223      (grid_3 && grid_3->Global().LocalSize().Product() > 0);
    224224
    225225  MPI_Comm_rank(comm_global, &rank);
     
    263263    for (iter=communicators_local_unique.begin(); iter!=communicators_local_unique.end(); ++iter) {
    264264      if (*iter != MPI_COMM_NULL) {
    265         MPI_Comm_compare(comm_new, *iter, &comm_equal);
    266         assert(comm_equal != MPI_SIMILAR);
    267         if (comm_equal == MPI_IDENT || comm_equal == MPI_CONGRUENT) {
    268           MPI_Comm_free(&comm_new);
    269           comm_new = *iter;
    270           break;
    271         }
     265        MPI_Comm_compare(comm_new, *iter, &comm_equal);
     266        assert(comm_equal != MPI_SIMILAR);
     267        if (comm_equal == MPI_IDENT || comm_equal == MPI_CONGRUENT) {
     268          MPI_Comm_free(&comm_new);
     269          comm_new = *iter;
     270          break;
     271        }
    272272      }
    273273    }
     
    283283  bool dt_is_new = true;
    284284
    285     // Insert into map
     285  // Insert into map
    286286  std::pair< std::map<VMG::MPI::KeyUnsorted, VMG::MPI::DatatypesGlobal>::iterator, bool > insert_result =
    287     datatypes_global.insert(std::make_pair(VMG::MPI::KeyUnsorted(grid_old, grid_new, direction), VMG::MPI::DatatypesGlobal()));
     287      datatypes_global.insert(std::make_pair(VMG::MPI::KeyUnsorted(grid_old, grid_new, direction), VMG::MPI::DatatypesGlobal()));
    288288  VMG::MPI::DatatypesGlobal& dt_global = insert_result.first->second;
    289289  dt_is_new = insert_result.second;
     
    310310    if (&grid_old == &grid_new) {
    311311      for (int i=0; i<6; ++i)
    312         buffer[6*rank+i] = 0;
     312        buffer[6*rank+i] = 0;
    313313    }else {
    314314      for (int i=0; i<3; ++i) {
    315         buffer[6*rank+i] = grid_old.Global().LocalBegin()[i];
    316         buffer[6*rank+i+3] = grid_old.Global().LocalEnd()[i];
     315        buffer[6*rank+i] = grid_old.Global().LocalBegin()[i];
     316        buffer[6*rank+i+3] = grid_old.Global().LocalEnd()[i];
    317317      }
    318318    }
     
    324324      // Decide who offers a useful grid part
    325325      for (int i=0; i<size; ++i) {
    326         for (int j=0; j<3; ++j) {
    327           begin[j] = buffer[6*i+j];
    328           end[j] = buffer[6*i+j+3];
    329         }
    330 
    331         begin = begin.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
    332         end = end.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
    333 
    334         if ((end-begin).Product() > 0) {
    335           // This process has a useful part
    336           dt_global.Receive().push_back(VMG::MPI::Datatype(grid_new.Local().SizeTotal(),
    337                                                            end - begin,
    338                                                            begin - grid_new.Global().LocalBegin() + offset_new,
    339                                                            i, 0, 0, true));
    340         }
     326        for (int j=0; j<3; ++j) {
     327          begin[j] = buffer[6*i+j];
     328          end[j] = buffer[6*i+j+3];
     329        }
     330
     331        begin = begin.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
     332        end = end.Clamp(grid_new.Global().LocalBegin(), grid_new.Global().LocalEnd());
     333
     334        if ((end-begin).Product() > 0) {
     335          // This process has a useful part
     336          dt_global.Receive().push_back(VMG::MPI::Datatype(grid_new.Local().SizeTotal(),
     337              end - begin,
     338              begin - grid_new.Global().LocalBegin() + grid_new.Global().GlobalBegin() + offset_new,
     339              i, 0, 0, true));
     340        }
    341341      }
    342342    }
     
    355355      for (int i=0; i<size; ++i) {
    356356
    357         if ((i == rank) && (&grid_old == &grid_new))
    358           continue;
    359 
    360         for (int j=0; j<3; ++j) {
    361           begin[j] = buffer[6*i+j];
    362           end[j] = buffer[6*i+j+3];
    363         }
    364 
    365         begin = begin.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
    366         end = end.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
    367 
    368         if ((end-begin).Product() > 0) {
    369           // This process needs one of my parts
    370           dt_global.Send().push_back(VMG::MPI::Datatype(grid_old.Local().SizeTotal(),
    371                                                         end - begin,
    372                                                         begin - grid_old.Global().LocalBegin() + offset_old,
    373                                                         i, 0, 0, true));
    374         }
     357        if ((i == rank) && (&grid_old == &grid_new))
     358          continue;
     359
     360        for (int j=0; j<3; ++j) {
     361          begin[j] = buffer[6*i+j];
     362          end[j] = buffer[6*i+j+3];
     363        }
     364
     365        begin = begin.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
     366        end = end.Clamp(grid_old.Global().LocalBegin(), grid_old.Global().LocalEnd());
     367
     368        if ((end-begin).Product() > 0) {
     369          // This process needs one of my parts
     370          dt_global.Send().push_back(VMG::MPI::Datatype(grid_old.Local().SizeTotal(),
     371              end - begin,
     372              begin - grid_old.Global().LocalBegin() + grid_old.Global().GlobalBegin() + offset_old,
     373              i, 0, 0, true));
     374        }
    375375      }
    376376    }
     
    379379
    380380MPI_Datatype& VMG::MPI::Settings::Datatype(const Index& begin, const Index& end,
    381                                            const Index& size_local, const Index& size_global,
    382                                            const int& level)
     381    const Index& size_local, const Index& size_global,
     382    const int& level)
    383383{
    384384  KeyUnsorted k(begin, end, size_local, size_global, level, 0);
Note: See TracChangeset for help on using the changeset viewer.