Changeset 5b5724


Ignore:
Timestamp:
Dec 19, 2012, 3:25:31 PM (12 years ago)
Author:
Frederik Heber <heber@…>
Branches:
Action_Thermostats, Add_AtomRandomPerturbation, Add_FitFragmentPartialChargesAction, Add_RotateAroundBondAction, Add_SelectAtomByNameAction, Added_ParseSaveFragmentResults, AddingActions_SaveParseParticleParameters, Adding_Graph_to_ChangeBondActions, Adding_MD_integration_tests, Adding_ParticleName_to_Atom, Adding_StructOpt_integration_tests, AtomFragments, Automaking_mpqc_open, AutomationFragmentation_failures, Candidate_v1.5.4, Candidate_v1.6.0, Candidate_v1.6.1, ChangeBugEmailaddress, ChangingTestPorts, ChemicalSpaceEvaluator, CombiningParticlePotentialParsing, Combining_Subpackages, Debian_Package_split, Debian_package_split_molecuildergui_only, Disabling_MemDebug, Docu_Python_wait, EmpiricalPotential_contain_HomologyGraph, EmpiricalPotential_contain_HomologyGraph_documentation, Enable_parallel_make_install, Enhance_userguide, Enhanced_StructuralOptimization, Enhanced_StructuralOptimization_continued, Example_ManyWaysToTranslateAtom, Exclude_Hydrogens_annealWithBondGraph, FitPartialCharges_GlobalError, Fix_BoundInBox_CenterInBox_MoleculeActions, Fix_ChargeSampling_PBC, Fix_ChronosMutex, Fix_FitPartialCharges, Fix_FitPotential_needs_atomicnumbers, Fix_ForceAnnealing, Fix_IndependentFragmentGrids, Fix_ParseParticles, Fix_ParseParticles_split_forward_backward_Actions, Fix_PopActions, Fix_QtFragmentList_sorted_selection, Fix_Restrictedkeyset_FragmentMolecule, Fix_StatusMsg, Fix_StepWorldTime_single_argument, Fix_Verbose_Codepatterns, Fix_fitting_potentials, Fixes, ForceAnnealing_goodresults, ForceAnnealing_oldresults, ForceAnnealing_tocheck, ForceAnnealing_with_BondGraph, ForceAnnealing_with_BondGraph_continued, ForceAnnealing_with_BondGraph_continued_betteresults, ForceAnnealing_with_BondGraph_contraction-expansion, FragmentAction_writes_AtomFragments, FragmentMolecule_checks_bonddegrees, GeometryObjects, Gui_Fixes, Gui_displays_atomic_force_velocity, ImplicitCharges, IndependentFragmentGrids, IndependentFragmentGrids_IndividualZeroInstances, IndependentFragmentGrids_IntegrationTest, IndependentFragmentGrids_Sole_NN_Calculation, JobMarket_RobustOnKillsSegFaults, JobMarket_StableWorkerPool, JobMarket_unresolvable_hostname_fix, MoreRobust_FragmentAutomation, ODR_violation_mpqc_open, PartialCharges_OrthogonalSummation, PdbParser_setsAtomName, PythonUI_with_named_parameters, QtGui_reactivate_TimeChanged_changes, Recreated_GuiChecks, Rewrite_FitPartialCharges, RotateToPrincipalAxisSystem_UndoRedo, SaturateAtoms_findBestMatching, SaturateAtoms_singleDegree, StoppableMakroAction, Subpackage_CodePatterns, Subpackage_JobMarket, Subpackage_LinearAlgebra, Subpackage_levmar, Subpackage_mpqc_open, Subpackage_vmg, Switchable_LogView, ThirdParty_MPQC_rebuilt_buildsystem, TrajectoryDependenant_MaxOrder, TremoloParser_IncreasedPrecision, TremoloParser_MultipleTimesteps, TremoloParser_setsAtomName, Ubuntu_1604_changes, stable
Children:
3c1465
Parents:
1dca9a
git-author:
Frederik Heber <heber@…> (10/03/12 19:58:56)
git-committer:
Frederik Heber <heber@…> (12/19/12 15:25:31)
Message:

FunctionApproximation now uses levmar_der, i.e. additionally the derivative of the FunctionModel.

Location:
src
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • src/FunctionApproximation/FunctionApproximation.cpp

    r1dca9a r5b5724  
    7979}
    8080
    81 
    82 /* Meyer's (reformulated) problem, minimum at (2.48, 6.18, 3.45) */
    83 void meyer(double *p, double *x, int m, int n, void *data)
    84 {
    85 register int i;
    86 double ui;
    87 
    88         for(i=0; i<n; ++i){
    89                 ui=0.45+0.05*i;
    90                 x[i]=p[0]*exp(10.0*p[1]/(ui+p[2]) - 13.0);
    91         }
    92 }
    93 
    94 void jacmeyer(double *p, double *jac, int m, int n, void *data)
    95 {
    96 register int i, j;
    97 double ui, tmp;
    98 
    99   for(i=j=0; i<n; ++i){
    100           ui=0.45+0.05*i;
    101           tmp=exp(10.0*p[1]/(ui+p[2]) - 13.0);
    102 
    103           jac[j++]=tmp;
    104           jac[j++]=10.0*p[0]*tmp/(ui+p[2]);
    105           jac[j++]=-10.0*p[0]*p[1]*tmp/((ui+p[2])*(ui+p[2]));
    106   }
    107 }
    108 
    10981/** Callback to circumvent boost::bind, boost::function and function pointer problem.
    11082 *
     
    129101  function(p,x,m,n,data);
    130102}
     103
     104void FunctionApproximation::LevMarDerivativeCallback(double *p, double *x, int m, int n, void *data)
     105{
     106  FunctionApproximation *approximator = static_cast<FunctionApproximation *>(data);
     107  ASSERT( approximator != NULL,
     108      "LevMarDerivativeCallback() - received data does not represent a FunctionApproximation object.");
     109  boost::function<void(double*,double*,int,int,void*)> function =
     110      boost::bind(&FunctionApproximation::evaluateDerivative, approximator, _1, _2, _3, _4, _5);
     111  function(p,x,m,n,data);
     112}
     113
    131114
    132115void FunctionApproximation::operator()()
     
    177160    // give this pointer as additional data to construct function pointer in
    178161    // LevMarCallback and call
    179     ret=dlevmar_dif(&FunctionApproximation::LevMarCallback, p, x, m, n, 1000, opts, info, work, covar, this); // no Jacobian, caller allocates work memory, covariance estimated
     162//    ret=dlevmar_dif(
     163//        &FunctionApproximation::LevMarCallback,
     164//        p, x, m, n, 1000, opts, info, work, covar, this); // no Jacobian, caller allocates work memory, covariance estimated
     165    ret=dlevmar_der(
     166        &FunctionApproximation::LevMarCallback,
     167        &FunctionApproximation::LevMarDerivativeCallback,
     168        p, x, m, n, 1000, opts, info, work, covar, this); // no Jacobian, caller allocates work memory, covariance estimated
    180169
    181170    {
     
    193182    free(work);
    194183  }
    195 
    196 //  {
    197 //   double err[16];
    198 //   dlevmar_chkjac(meyer, jacmeyer, p, m, n, NULL, err);
    199 //   for(i=0; i<n; ++i) printf("gradient %d, err %g\n", i, err[i]);
    200 //  }
    201184
    202185  {
     
    223206  }
    224207
     208  {
     209   double *err = new double[n];
     210   dlevmar_chkjac(
     211      &FunctionApproximation::LevMarCallback,
     212      &FunctionApproximation::LevMarDerivativeCallback,
     213      p, m, n, this, err);
     214   for(i=0; i<n; ++i)
     215     LOG(1, "INFO: gradient " << i << ", err " << err[i] << ".");
     216   delete[] err;
     217  }
     218
     219
    225220  delete[] p;
    226221  delete[] x;
     
    232227}
    233228
     229void FunctionApproximation::prepareModel(double *p, int m)
     230{
     231  ASSERT( (size_t)m == model.getParameterDimension(),
     232      "FunctionApproximation::prepareModel() - LevMar expects "+toString(m)
     233      +" parameters but the model function expects "+toString(model.getParameterDimension())+".");
     234  FunctionModel::parameters_t params(m, 0.);
     235  std::copy(p, p+m, params.begin());
     236  model.setParameters(params);
     237}
     238
    234239void FunctionApproximation::evaluate(double *p, double *x, int m, int n, void *data)
    235240{
    236241  // first set parameters
    237   ASSERT( (size_t)m == model.getParameterDimension(),
    238       "FunctionApproximation::evaluate() - LevMar expects "+toString(m)
    239       +" parameters but the model function expects "+toString(model.getParameterDimension())+".");
     242  prepareModel(p,m);
     243
     244  // then evaluate
    240245  ASSERT( (size_t)n == output_data.size(),
    241246      "FunctionApproximation::evaluate() - LevMar expects "+toString(n)
    242247      +" outputs but we provide "+toString(output_data.size())+".");
    243   FunctionModel::parameters_t params(m, 0.);
    244   std::copy(p, p+m, params.begin());
    245   model.setParameters(params);
    246 
    247   // then evaluate
    248248  if (!output_data.empty()) {
    249249    inputs_t::const_iterator initer = input_data.begin();
    250250    outputs_t::const_iterator outiter = output_data.begin();
    251251    size_t index = 0;
    252     for (; initer != input_data.end(); ++initer, ++outiter, ++index) {
     252    for (; initer != input_data.end(); ++initer, ++outiter) {
    253253      // result may be a vector, calculate L2 norm
    254254      const FunctionModel::results_t functionvalue =
    255255          model(*initer);
    256       std::vector<double> differences(functionvalue.size(), 0.);
    257       std::transform(
    258           functionvalue.begin(), functionvalue.end(), outiter->begin(),
    259           differences.begin(),
    260           &SquaredDifference);
    261       x[index] = std::accumulate(differences.begin(), differences.end(), 0.);
    262     }
    263   }
    264 }
     256      x[index++] = functionvalue[0];
     257//      std::vector<double> differences(functionvalue.size(), 0.);
     258//      std::transform(
     259//          functionvalue.begin(), functionvalue.end(), outiter->begin(),
     260//          differences.begin(),
     261//          &SquaredDifference);
     262//      x[index] = std::accumulate(differences.begin(), differences.end(), 0.);
     263    }
     264  }
     265}
     266
     267void FunctionApproximation::evaluateDerivative(double *p, double *jac, int m, int n, void *data)
     268{
     269  // first set parameters
     270  prepareModel(p,m);
     271
     272  // then evaluate
     273  ASSERT( (size_t)n == output_data.size(),
     274      "FunctionApproximation::evaluateDerivative() - LevMar expects "+toString(n)
     275      +" outputs but we provide "+toString(output_data.size())+".");
     276  if (!output_data.empty()) {
     277    inputs_t::const_iterator initer = input_data.begin();
     278    outputs_t::const_iterator outiter = output_data.begin();
     279    size_t index = 0;
     280    for (; initer != input_data.end(); ++initer, ++outiter) {
     281      // result may be a vector, calculate L2 norm
     282      for (int paramindex = 0; paramindex < m; ++paramindex) {
     283        const FunctionModel::results_t functionvalue =
     284            model.parameter_derivative(*initer, paramindex);
     285        jac[index++] = functionvalue[0];
     286      }
     287//      std::vector<double> differences(functionvalue.size(), 0.);
     288//      std::transform(
     289//          functionvalue.begin(), functionvalue.end(), outiter->begin(),
     290//          differences.begin(),
     291//          &SquaredDifference);
     292//      x[index] = std::accumulate(differences.begin(), differences.end(), 0.);
     293    }
     294  }
     295}
  • src/FunctionApproximation/FunctionApproximation.hpp

    r1dca9a r5b5724  
    8888
    8989  /** Evaluates the model function for each pair of training tuple and returns
    90    * the error between the output of the function and the expected output as a
    91    * vector.
     90   * the output of the function as a vector.
    9291   *
    9392   * This function as a signature compatible to the one required by the
     
    102101  void evaluate(double *p, double *x, int m, int n, void *data);
    103102
     103  /** Evaluates the parameter derivative of the model function for each pair of
     104   * training tuple and returns the output of the function as vector.
     105   *
     106   * This function as a signature compatible to the one required by the
     107   * LevMar package (with double precision).
     108   *
     109   * \param *p array of parameters for the model function of dimension \a m
     110   * \param *jac on output jacobian matrix of result values of dimension \a n times \a m
     111   * \param m parameter dimension
     112   * \param n output dimension times parameter dimension
     113   * \param *data additional data, unused here
     114   */
     115  void evaluateDerivative(double *p, double *jac, int m, int n, void *data);
     116
    104117private:
    105118  static void LevMarCallback(double *p, double *x, int m, int n, void *data);
     119
     120  static void LevMarDerivativeCallback(double *p, double *x, int m, int n, void *data);
     121
     122  void prepareModel(double *p, int m);
    106123
    107124private:
  • src/FunctionApproximation/FunctionArgument.hpp

    r1dca9a r5b5724  
    2626struct argument_t
    2727{
     28  typedef std::pair<size_t, size_t> indices_t;
     29
     30  /** Default constructor for class argument_t.
     31   *
     32   */
     33  argument_t() :
     34    indices( std::make_pair(0,1) ),
     35    distance(0.)
     36  {}
     37
     38  /** Constructor for class argument_t.
     39   *
     40   * This constructors uses the index pair (0,1) as default.
     41   *
     42   * \param _distance distance argument
     43   */
     44  argument_t(const double &_distance) :
     45    indices( std::make_pair(0,1) ),
     46    distance(_distance)
     47  {}
     48
     49  /** Constructor for class argument_t.
     50   *
     51   * \param _indices pair of indices associated with the \a _distance
     52   * \param _distance distance argument
     53   */
     54  argument_t(const indices_t &_indices, const double &_distance) :
     55    indices( _indices ),
     56    distance(_distance)
     57  {}
     58
    2859  //!> indices between which the distance is given
    29   std::pair<size_t, size_t> indices;
     60  indices_t indices;
    3061  //!> distance
    3162  double distance;
  • src/LevMartester.cpp

    r1dca9a r5b5724  
    177177
    178178  // now perform the function approximation by optimizing the model function
    179   PairPotential_Harmonic harmonic(1., 2., 0.);
     179  PairPotential_Harmonic harmonic(1., 2.9, -80.);
    180180  FunctionModel &model = harmonic;
    181181  FunctionApproximation approximator(1, 1, model);
  • src/Potentials/Specifics/PairPotential_Harmonic.cpp

    r1dca9a r5b5724  
    8888}
    8989
     90PairPotential_Harmonic::results_t
     91PairPotential_Harmonic::parameter_derivative(
     92    const arguments_t &arguments,
     93    const size_t index
     94    ) const
     95{
     96  ASSERT( arguments.size() == 1,
     97      "PairPotential_Harmonic::parameter_derivative() - requires exactly one argument.");
     98  const argument_t &r_ij = arguments[0];
     99  switch (index) {
     100    case spring_constant:
     101    {
     102      const result_t result =
     103                 Helpers::pow( r_ij.distance - params[equilibrium_distance], 2 );
     104      return std::vector<result_t>(1, result);
     105      break;
     106    }
     107    case equilibrium_distance:
     108    {
     109      const result_t result =
     110          -2. * params[spring_constant]
     111                 * ( r_ij.distance - params[equilibrium_distance]);
     112      return std::vector<result_t>(1, result);
     113      break;
     114    }
     115    case energy_offset:
     116    {
     117      const result_t result = +1.;
     118      return std::vector<result_t>(1, result);
     119      break;
     120    }
     121    default:
     122      break;
     123  }
     124
     125  return PairPotential_Harmonic::results_t(1, 0.);
     126}
     127
  • src/Potentials/Specifics/PairPotential_Harmonic.hpp

    r1dca9a r5b5724  
    9494   *         input
    9595   */
    96 //  results_t parameter_derivative(const arguments_t &arguments, const size_t index) const;
     96  results_t parameter_derivative(const arguments_t &arguments, const size_t index) const;
    9797
    9898private:
Note: See TracChangeset for help on using the changeset viewer.