/*
 * Project: MoleCuilder
 * Description: creates and alters molecular systems
 * Copyright (C)  2012 University of Bonn. All rights reserved.
 * Please see the COPYING file or "Copyright notice" in builder.cpp for details.
 *
 *
 *   This file is part of MoleCuilder.
 *
 *    MoleCuilder is free software: you can redistribute it and/or modify
 *    it under the terms of the GNU General Public License as published by
 *    the Free Software Foundation, either version 2 of the License, or
 *    (at your option) any later version.
 *
 *    MoleCuilder is distributed in the hope that it will be useful,
 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
 *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *    GNU General Public License for more details.
 *
 *    You should have received a copy of the GNU General Public License
 *    along with MoleCuilder.  If not, see .
 */
/*
 * FunctionApproximation.cpp
 *
 *  Created on: 02.10.2012
 *      Author: heber
 */
// include config.h
#ifdef HAVE_CONFIG_H
#include 
#endif
#include "CodePatterns/MemDebug.hpp"
#include "FunctionApproximation.hpp"
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include "CodePatterns/Assert.hpp"
#include "CodePatterns/Log.hpp"
#include "FunctionApproximation/FunctionModel.hpp"
void FunctionApproximation::setTrainingData(const inputs_t &input, const outputs_t &output)
{
  ASSERT( input.size() == output.size(),
      "FunctionApproximation::setTrainingData() - the number of input and output tuples differ: "+toString(input.size())+"!="
      +toString(output.size())+".");
  if (input.size() != 0) {
    ASSERT( input[0].size() == input_dimension,
        "FunctionApproximation::setTrainingData() - the dimension of the input tuples and input dimension differ: "+toString(input[0].size())+"!="
        +toString(input_dimension)+".");
    input_data = input;
    ASSERT( output[0].size() == output_dimension,
        "FunctionApproximation::setTrainingData() - the dimension of the output tuples and output dimension differ: "+toString(output[0].size())+"!="
        +toString(output_dimension)+".");
    output_data = output;
  } else {
    ELOG(2, "Given vectors of training data are empty, clearing internal vectors accordingly.");
    input_data.clear();
    output_data.clear();
  }
}
void FunctionApproximation::setModelFunction(FunctionModel &_model)
{
  model= _model;
}
/* Meyer's (reformulated) problem, minimum at (2.48, 6.18, 3.45) */
void meyer(double *p, double *x, int m, int n, void *data)
{
register int i;
double ui;
        for(i=0; i(data);
  ASSERT( approximator != NULL,
      "LevMarCallback() - received data does not represent a FunctionApproximation object.");
  boost::function function =
      boost::bind(&FunctionApproximation::evaluate, approximator, _1, _2, _3, _4, _5);
  function(p,x,m,n,data);
}
void FunctionApproximation::operator()()
{
  // let levmar optimize
  register int i, j;
  int ret;
  double *p;
  double *x; // we set zero vector by giving NULL
  int m, n;
  double opts[LM_OPTS_SZ], info[LM_INFO_SZ];
  opts[0]=LM_INIT_MU; opts[1]=1E-15; opts[2]=1E-15; opts[3]=1E-20;
  opts[4]= LM_DIFF_DELTA; // relevant only if the Jacobian is approximated using finite differences; specifies forward differencing
  //opts[4]=-LM_DIFF_DELTA; // specifies central differencing to approximate Jacobian; more accurate but more expensive to compute!
  m = model.getParameterDimension();
  n = output_data.size();
  const FunctionModel::parameters_t params = model.getParameters();
  {
    p = new double[m];
    size_t index = 0;
    for(FunctionModel::parameters_t::const_iterator paramiter = params.begin();
        paramiter != params.end();
        ++paramiter, ++index) {
      p[index] = *paramiter;
    }
  }
  {
    x = new double[n];
    size_t index = 0;
    for(outputs_t::const_iterator outiter = output_data.begin();
        outiter != output_data.end();
        ++outiter, ++index) {
      x[index] = (*outiter)[0];
    }
  }
  {
    double *work, *covar;
    work=(double *)malloc((LM_DIF_WORKSZ(m, n)+m*m)*sizeof(double));
    if(!work){
      ELOG(0, "FunctionApproximation::operator() - memory allocation request failed.");
      return;
    }
    covar=work+LM_DIF_WORKSZ(m, n);
    // give this pointer as additional data to construct function pointer in
    // LevMarCallback and call
    ret=dlevmar_dif(&FunctionApproximation::LevMarCallback, p, x, m, n, 1000, opts, info, work, covar, this); // no Jacobian, caller allocates work memory, covariance estimated
    {
      std::stringstream covar_msg;
      covar_msg << "Covariance of the fit:\n";
      for(i=0; i infonames(LM_INFO_SZ);
   infonames[0] = std::string("||e||_2 at initial p");
   infonames[1] = std::string("||e||_2");
   infonames[2] = std::string("||J^T e||_inf");
   infonames[3] = std::string("||Dp||_2");
   infonames[4] = std::string("mu/max[J^T J]_ii");
   infonames[5] = std::string("# iterations");
   infonames[6] = std::string("reason for termination");
   infonames[7] = std::string(" # function evaluations");
   infonames[8] = std::string(" # Jacobian evaluations");
   infonames[9] = std::string(" # linear systems solved");
   for(i=0; i differences(functionvalue.size(), 0.);
      std::transform(
          functionvalue.begin(), functionvalue.end(), outiter->begin(),
          differences.begin(),
          &SquaredDifference);
      x[index] = std::accumulate(differences.begin(), differences.end(), 0.);
    }
  }
}