/*
* Project: MoleCuilder
* Description: creates and alters molecular systems
* Copyright (C) 2012 University of Bonn. All rights reserved.
* Please see the COPYING file or "Copyright notice" in builder.cpp for details.
*
*
* This file is part of MoleCuilder.
*
* MoleCuilder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* MoleCuilder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with MoleCuilder. If not, see .
*/
/*
* FunctionApproximation.cpp
*
* Created on: 02.10.2012
* Author: heber
*/
// include config.h
#ifdef HAVE_CONFIG_H
#include
#endif
#include "CodePatterns/MemDebug.hpp"
#include "FunctionApproximation.hpp"
#include
#include
#include
#include
#include
#include
#include
#include
#include "CodePatterns/Assert.hpp"
#include "CodePatterns/Log.hpp"
#include "FunctionApproximation/FunctionModel.hpp"
void FunctionApproximation::setTrainingData(const inputs_t &input, const outputs_t &output)
{
ASSERT( input.size() == output.size(),
"FunctionApproximation::setTrainingData() - the number of input and output tuples differ: "+toString(input.size())+"!="
+toString(output.size())+".");
if (input.size() != 0) {
ASSERT( input[0].size() == input_dimension,
"FunctionApproximation::setTrainingData() - the dimension of the input tuples and input dimension differ: "+toString(input[0].size())+"!="
+toString(input_dimension)+".");
input_data = input;
ASSERT( output[0].size() == output_dimension,
"FunctionApproximation::setTrainingData() - the dimension of the output tuples and output dimension differ: "+toString(output[0].size())+"!="
+toString(output_dimension)+".");
output_data = output;
} else {
ELOG(2, "Given vectors of training data are empty, clearing internal vectors accordingly.");
input_data.clear();
output_data.clear();
}
}
void FunctionApproximation::setModelFunction(FunctionModel &_model)
{
model= _model;
}
/** Callback to circumvent boost::bind, boost::function and function pointer problem.
*
* See here (second answer!) to the nature of the problem:
* http://stackoverflow.com/questions/282372/demote-boostfunction-to-a-plain-function-pointer
*
* We cannot use a boost::bind bounded boost::function as a function pointer.
* boost::function::target() will just return NULL because the signature does not
* match. We have to use a C-style callback function and our luck is that
* the levmar signature provides for a void* additional data pointer which we
* can cast back to our FunctionApproximation class, as we need access to the
* data contained, e.g. the FunctionModel reference FunctionApproximation::model.
*
*/
void FunctionApproximation::LevMarCallback(double *p, double *x, int m, int n, void *data)
{
FunctionApproximation *approximator = static_cast(data);
ASSERT( approximator != NULL,
"LevMarCallback() - received data does not represent a FunctionApproximation object.");
boost::function function =
boost::bind(&FunctionApproximation::evaluate, approximator, _1, _2, _3, _4, _5);
function(p,x,m,n,data);
}
void FunctionApproximation::LevMarDerivativeCallback(double *p, double *x, int m, int n, void *data)
{
FunctionApproximation *approximator = static_cast(data);
ASSERT( approximator != NULL,
"LevMarDerivativeCallback() - received data does not represent a FunctionApproximation object.");
boost::function function =
boost::bind(&FunctionApproximation::evaluateDerivative, approximator, _1, _2, _3, _4, _5);
function(p,x,m,n,data);
}
void FunctionApproximation::prepareParameters(double *&p, int &m) const
{
m = model.getParameterDimension();
const FunctionModel::parameters_t params = model.getParameters();
{
p = new double[m];
size_t index = 0;
for(FunctionModel::parameters_t::const_iterator paramiter = params.begin();
paramiter != params.end();
++paramiter, ++index) {
p[index] = *paramiter;
}
}
}
void FunctionApproximation::prepareOutput(double *&x, int &n) const
{
n = output_data.size();
{
x = new double[n];
size_t index = 0;
for(outputs_t::const_iterator outiter = output_data.begin();
outiter != output_data.end();
++outiter, ++index) {
x[index] = (*outiter)[0];
}
}
}
void FunctionApproximation::operator()(const enum JacobianMode mode)
{
// let levmar optimize
register int i, j;
int ret;
double *p;
double *x;
int m, n;
double opts[LM_OPTS_SZ], info[LM_INFO_SZ];
opts[0]=LM_INIT_MU; opts[1]=1E-15; opts[2]=1E-15; opts[3]=1E-20;
opts[4]= LM_DIFF_DELTA; // relevant only if the Jacobian is approximated using finite differences; specifies forward differencing
//opts[4]=-LM_DIFF_DELTA; // specifies central differencing to approximate Jacobian; more accurate but more expensive to compute!
prepareParameters(p,m);
prepareOutput(x,n);
{
double *work, *covar;
work=(double *)malloc((LM_DIF_WORKSZ(m, n)+m*m)*sizeof(double));
if(!work){
ELOG(0, "FunctionApproximation::operator() - memory allocation request failed.");
return;
}
covar=work+LM_DIF_WORKSZ(m, n);
// give this pointer as additional data to construct function pointer in
// LevMarCallback and call
if (model.isBoxConstraint()) {
FunctionModel::parameters_t lowerbound = model.getLowerBoxConstraints();
FunctionModel::parameters_t upperbound = model.getUpperBoxConstraints();
double *lb = new double[m];
double *ub = new double[m];
for (size_t i=0;i infonames(LM_INFO_SZ);
infonames[0] = std::string("||e||_2 at initial p");
infonames[1] = std::string("||e||_2");
infonames[2] = std::string("||J^T e||_inf");
infonames[3] = std::string("||Dp||_2");
infonames[4] = std::string("mu/max[J^T J]_ii");
infonames[5] = std::string("# iterations");
infonames[6] = std::string("reason for termination");
infonames[7] = std::string(" # function evaluations");
infonames[8] = std::string(" # Jacobian evaluations");
infonames[9] = std::string(" # linear systems solved");
for(i=0; i 0.5;
if (!status) {
int faulty;
ELOG(0, "At least one of the parameter derivatives are incorrect.");
for (faulty=1; faulty<=m; ++faulty) {
LOG(1, "INFO: Trying with only the first " << faulty << " parameters...");
model.setParameters(backupparams);
dlevmar_chkjac(
&FunctionApproximation::LevMarCallback,
&FunctionApproximation::LevMarDerivativeCallback,
p, faulty, n, this, err);
bool status = true;
for(i=0; i 0.5;
for(i=0; i differences(functionvalue.size(), 0.);
// std::transform(
// functionvalue.begin(), functionvalue.end(), outiter->begin(),
// differences.begin(),
// &SquaredDifference);
// x[index] = std::accumulate(differences.begin(), differences.end(), 0.);
}
}
}
void FunctionApproximation::evaluateDerivative(double *p, double *jac, int m, int n, void *data)
{
// first set parameters
prepareModel(p,m);
// then evaluate
ASSERT( (size_t)n == output_data.size(),
"FunctionApproximation::evaluateDerivative() - LevMar expects "+toString(n)
+" outputs but we provide "+toString(output_data.size())+".");
if (!output_data.empty()) {
inputs_t::const_iterator initer = input_data.begin();
outputs_t::const_iterator outiter = output_data.begin();
size_t index = 0;
for (; initer != input_data.end(); ++initer, ++outiter) {
// result may be a vector, calculate L2 norm
for (int paramindex = 0; paramindex < m; ++paramindex) {
const FunctionModel::results_t functionvalue =
model.parameter_derivative(*initer, paramindex);
jac[index++] = functionvalue[0];
}
// std::vector differences(functionvalue.size(), 0.);
// std::transform(
// functionvalue.begin(), functionvalue.end(), outiter->begin(),
// differences.begin(),
// &SquaredDifference);
// x[index] = std::accumulate(differences.begin(), differences.end(), 0.);
}
}
}