/*
* vmg - a versatile multigrid solver
* Copyright (C) 2012 Institute for Numerical Simulation, University of Bonn
*
* vmg is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* vmg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*/
/**
* @file domain_decomposition_mpi.cpp
* @author Julian Iseringhausen
* @date Mon Jun 27 12:53:50 2011
*
* @brief Computes a domain decomposition which separates
* the finest grid equally for all processes.
*
*/
#ifdef HAVE_CONFIG_H
#include
#endif
#include "base/interface.hpp"
#include "comm/comm.hpp"
#include "comm/domain_decomposition_mpi.hpp"
#include "grid/grid.hpp"
#include "grid/multigrid.hpp"
using namespace VMG;
void DomainDecompositionMPI::Compute(Comm& comm, const Interface& interface, std::map >& global)
{
GlobalIndices global_l;
Index pos, remainder, procs;
Index last_procs = comm.GlobalProcs();
global.clear();
for (unsigned int i=0; i= remainder[j])
global_l.LocalBegin()[j] += remainder[j];
global_l.LocalEnd() = global_l.LocalBegin() + global_l.LocalSize();
} else {
for (int j=0; j<3; ++j) {
if (procs[j] == last_procs[j]) {
if (global[pos].back().LocalBegin()[j] == global[pos].back().GlobalBegin()[j])
global_l.LocalBegin()[j] = global_l.GlobalBegin()[j];
else
global_l.LocalBegin()[j] = global[pos].back().LocalBegin()[j] / 2;
if (global[pos].back().LocalEnd()[j] == global[pos].back().GlobalEnd()[j])
global_l.LocalEnd()[j] = global_l.GlobalEnd()[j];
else
global_l.LocalEnd()[j] = global[pos].back().LocalEnd()[j] / 2;
global_l.LocalSize()[j] = global_l.LocalEnd()[j] - global_l.LocalBegin()[j];
} else {
remainder[j] = global_l.GlobalSize()[j] % procs[j];
global_l.LocalSize()[j] = global_l.GlobalSize()[j] / procs[j];
if (pos[j] < remainder[j])
++(global_l.LocalSize()[j]);
global_l.LocalBegin()[j] = global_l.GlobalBegin()[j] + pos[j] * global_l.LocalSize()[j];
if (pos[j] >= remainder[j])
global_l.LocalBegin()[j] += remainder[j];
global_l.LocalEnd()[j] = global_l.LocalBegin()[j] + global_l.LocalSize()[j];
}
}
}
}else {
global_l.LocalBegin() = 0;
global_l.LocalEnd() = 0;
global_l.LocalSize() = 0;
}
global[pos].push_back(global_l);
}
last_procs = procs;
}
}
bool DomainDecompositionMPI::IsActive(const Index& size_global, const Index& pos, Index& procs, const Index& max_procs)
{
bool is_active = true;
const int points_min = 5;
procs = size_global / points_min + 1;
for (int i=0; i<3; ++i) {
procs[i] = std::min(procs[i], max_procs[i]);
is_active &= pos[i] < procs[i];
}
return is_active;
}
void DomainDecompositionMPI::FineToCoarse(Comm& comm, int& begin, int& end, int levels)
{
int last_point = end - 1;
for (int i=0; i