source: src/FunctionApproximation/FunctionApproximation.hpp@ e60558

Action_Thermostats Add_AtomRandomPerturbation Add_RotateAroundBondAction Add_SelectAtomByNameAction Adding_Graph_to_ChangeBondActions Adding_MD_integration_tests Adding_StructOpt_integration_tests Automaking_mpqc_open AutomationFragmentation_failures Candidate_v1.6.0 Candidate_v1.6.1 ChangeBugEmailaddress ChangingTestPorts ChemicalSpaceEvaluator Combining_Subpackages Debian_Package_split Debian_package_split_molecuildergui_only Disabling_MemDebug Docu_Python_wait EmpiricalPotential_contain_HomologyGraph EmpiricalPotential_contain_HomologyGraph_documentation Enable_parallel_make_install Enhance_userguide Enhanced_StructuralOptimization Enhanced_StructuralOptimization_continued Example_ManyWaysToTranslateAtom Exclude_Hydrogens_annealWithBondGraph FitPartialCharges_GlobalError Fix_ChronosMutex Fix_StatusMsg Fix_StepWorldTime_single_argument Fix_Verbose_Codepatterns ForceAnnealing_goodresults ForceAnnealing_oldresults ForceAnnealing_tocheck ForceAnnealing_with_BondGraph ForceAnnealing_with_BondGraph_continued ForceAnnealing_with_BondGraph_continued_betteresults ForceAnnealing_with_BondGraph_contraction-expansion GeometryObjects Gui_displays_atomic_force_velocity IndependentFragmentGrids_IntegrationTest JobMarket_RobustOnKillsSegFaults JobMarket_StableWorkerPool JobMarket_unresolvable_hostname_fix ODR_violation_mpqc_open PartialCharges_OrthogonalSummation PythonUI_with_named_parameters QtGui_reactivate_TimeChanged_changes Recreated_GuiChecks RotateToPrincipalAxisSystem_UndoRedo StoppableMakroAction Subpackage_CodePatterns Subpackage_JobMarket Subpackage_LinearAlgebra Subpackage_levmar Subpackage_mpqc_open Subpackage_vmg ThirdParty_MPQC_rebuilt_buildsystem TremoloParser_IncreasedPrecision TremoloParser_MultipleTimesteps Ubuntu_1604_changes stable
Last change on this file since e60558 was b40690, checked in by Frederik Heber <heber@…>, 9 years ago

Fit..PotentialAction now allow setting maximum number of optimization iteration.

  • also reduced default value to 100 instead of 1000 for speeding up tests.
  • Property mode set to 100644
File size: 7.5 KB
Line 
1/*
2 * FunctionApproximation.hpp
3 *
4 * Created on: 02.10.2012
5 * Author: heber
6 */
7
8#ifndef FUNCTIONAPPROXIMATION_HPP_
9#define FUNCTIONAPPROXIMATION_HPP_
10
11// include config.h
12#ifdef HAVE_CONFIG_H
13#include <config.h>
14#endif
15
16#include <vector>
17
18#include "FunctionApproximation/FunctionModel.hpp"
19
20class TrainingData;
21
22/** This class encapsulates the solution to approximating a high-dimensional
23 * function represented by two vectors of tuples, being input variables and
24 * output of the function via a model function, manipulated by a set of
25 * parameters.
26 *
27 * \note For this reason the input and output dimension has to be given in
28 * the constructor since these are fixed parameters to the problem as a
29 * whole and usually: a different input dimension means we have a completely
30 * different problem (and hence we may as well construct and new instance of
31 * this class).
32 *
33 * The "training data", i.e. the two sets of input and output values, is
34 * given extra.
35 *
36 * The problem is then that a given high-dimensional function is supplied,
37 * the "model", and we have to fit this function via its set of variable
38 * parameters. This fitting procedure is executed via a Levenberg-Marquardt
39 * algorithm as implemented in the
40 * <a href="http://www.ics.forth.gr/~lourakis/levmar/index.html">LevMar</a>
41 * package.
42 *
43 * \section FunctionApproximation-details Details on the inner workings.
44 *
45 * FunctionApproximation::operator() is the main function that performs the
46 * non-linear regression. It consists of the following steps:
47 * -# hand given (initial) parameters over to model.
48 * -# convert output vector to format suitable to levmar
49 * -# allocate memory for levmar to work in
50 * -# depending on whether the model is constrained or not and whether we
51 * have a derivative, we make use of various levmar functions with prepared
52 * parameters.
53 * -# memory is free'd and some final infos is given.
54 *
55 * levmar needs to evaluate the model. To this end, FunctionApproximation has
56 * two functions whose signatures is such as to match with the one required
57 * by the levmar package. Hence,
58 * -# FunctionApproximation::LevMarCallback()
59 * -# FunctionApproximation::LevMarDerivativeCallback()
60 * are used as callbacks by levmar only.
61 * These hand over the current set of parameters to the model, then both bind
62 * FunctionApproximation::evaluate() and
63 * FunctionApproximation::evaluateDerivative(), respectively, and execute
64 * FunctionModel::operator() or FunctionModel::parameter_derivative(),
65 * respectively.
66 *
67 */
68class FunctionApproximation
69{
70public:
71 //!> typedef for a vector of input arguments
72 typedef std::vector<FunctionModel::arguments_t> inputs_t;
73 //!> typedef for a vector of input arguments
74 typedef std::vector<FunctionModel::list_of_arguments_t> filtered_inputs_t;
75 //!> typedef for a vector of output values
76 typedef std::vector<FunctionModel::results_t> outputs_t;
77public:
78 /** Constructor of the class FunctionApproximation.
79 *
80 * \param _data container with tuple of (input, output) values
81 * \param _model FunctionModel to use in approximation
82 * \param _precision desired precision of fit
83 * \param _maxiterations maximum number of iterations for LevMar's optimization
84 */
85 FunctionApproximation(
86 const TrainingData &_data,
87 FunctionModel &_model,
88 const double _precision,
89 const unsigned int _maxiterations);
90
91 /** Constructor of the class FunctionApproximation.
92 *
93 * \param _input_dimension input dimension for this function approximation
94 * \param _output_dimension output dimension for this function approximation
95 * \param _model FunctionModel to use in approximation
96 */
97 FunctionApproximation(
98 const size_t &_input_dimension,
99 const size_t &_output_dimension,
100 FunctionModel &_model,
101 const double _precision,
102 const unsigned int _maxiterations) :
103 input_dimension(_input_dimension),
104 output_dimension(_output_dimension),
105 precision(_precision),
106 maxiterations(_maxiterations),
107 model(_model)
108 {}
109 /** Destructor for class FunctionApproximation.
110 *
111 */
112 ~FunctionApproximation()
113 {}
114
115 /** Setter for the training data to be used.
116 *
117 * \param input vector of input tuples, needs to be of
118 * FunctionApproximation::input_dimension size
119 * \param output vector of output tuples, needs to be of
120 * FunctionApproximation::output_dimension size
121 */
122 void setTrainingData(const filtered_inputs_t &input, const outputs_t &output);
123
124 /** Setter for the model function to be used in the approximation.
125 *
126 */
127 void setModelFunction(FunctionModel &_model);
128
129 /** This enum steers whether we use finite differences or
130 * FunctionModel::parameter_derivative to calculate the jacobian.
131 *
132 */
133 enum JacobianMode {
134 FiniteDifferences,
135 ParameterDerivative,
136 MAXMODE
137 };
138
139 /** This starts the fitting process, resulting in the parameters to
140 * the model function being optimized with respect to the given training
141 * data.
142 *
143 * \param mode whether to use finite differences or the parameter derivative
144 * in calculating the jacobian
145 */
146 void operator()(const enum JacobianMode mode = FiniteDifferences);
147
148 /** Evaluates the model function for each pair of training tuple and returns
149 * the output of the function as a vector.
150 *
151 * This function as a signature compatible to the one required by the
152 * LevMar package (with double precision).
153 *
154 * \param *p array of parameters for the model function of dimension \a m
155 * \param *x array of result values of dimension \a n
156 * \param m parameter dimension
157 * \param n output dimension
158 * \param *data additional data, unused here
159 */
160 void evaluate(double *p, double *x, int m, int n, void *data);
161
162 /** Evaluates the parameter derivative of the model function for each pair of
163 * training tuple and returns the output of the function as vector.
164 *
165 * This function as a signature compatible to the one required by the
166 * LevMar package (with double precision).
167 *
168 * \param *p array of parameters for the model function of dimension \a m
169 * \param *jac on output jacobian matrix of result values of dimension \a n times \a m
170 * \param m parameter dimension
171 * \param n output dimension times parameter dimension
172 * \param *data additional data, unused here
173 */
174 void evaluateDerivative(double *p, double *jac, int m, int n, void *data);
175
176 /** This functions checks whether the parameter derivative of the FunctionModel
177 * has been correctly implemented by validating against finite differences.
178 *
179 * We use LevMar's dlevmar_chkjac() function.
180 *
181 * \return true - gradients are ok (>0.5), false - else
182 */
183 bool checkParameterDerivatives();
184
185private:
186 static void LevMarCallback(double *p, double *x, int m, int n, void *data);
187
188 static void LevMarDerivativeCallback(double *p, double *x, int m, int n, void *data);
189
190 void prepareModel(double *p, int m);
191
192 void prepareParameters(double *&p, int &m) const;
193
194 void prepareOutput(double *&x, int &n) const;
195
196private:
197 //!> input dimension (is fixed from construction)
198 const size_t input_dimension;
199 //!> output dimension (is fixed from construction)
200 const size_t output_dimension;
201 //!> desired precision given to LevMar
202 const double precision;
203 //!> maximum number of iterations for LevMar
204 const unsigned int maxiterations;
205
206 //!> current input set of training data
207 filtered_inputs_t input_data;
208 //!> current output set of training data
209 outputs_t output_data;
210
211 //!> the model function to be used in the high-dimensional approximation
212 FunctionModel &model;
213};
214
215#endif /* FUNCTIONAPPROXIMATION_HPP_ */
Note: See TracBrowser for help on using the repository browser.