source: src/FunctionApproximation/FunctionApproximation.hpp@ cf4905

Action_Thermostats Add_AtomRandomPerturbation Add_FitFragmentPartialChargesAction Add_RotateAroundBondAction Add_SelectAtomByNameAction Added_ParseSaveFragmentResults AddingActions_SaveParseParticleParameters Adding_Graph_to_ChangeBondActions Adding_MD_integration_tests Adding_ParticleName_to_Atom Adding_StructOpt_integration_tests AtomFragments Automaking_mpqc_open AutomationFragmentation_failures Candidate_v1.5.4 Candidate_v1.6.0 Candidate_v1.6.1 ChangeBugEmailaddress ChangingTestPorts ChemicalSpaceEvaluator CombiningParticlePotentialParsing Combining_Subpackages Debian_Package_split Debian_package_split_molecuildergui_only Disabling_MemDebug Docu_Python_wait EmpiricalPotential_contain_HomologyGraph EmpiricalPotential_contain_HomologyGraph_documentation Enable_parallel_make_install Enhance_userguide Enhanced_StructuralOptimization Enhanced_StructuralOptimization_continued Example_ManyWaysToTranslateAtom Exclude_Hydrogens_annealWithBondGraph FitPartialCharges_GlobalError Fix_BoundInBox_CenterInBox_MoleculeActions Fix_ChargeSampling_PBC Fix_ChronosMutex Fix_FitPartialCharges Fix_FitPotential_needs_atomicnumbers Fix_ForceAnnealing Fix_IndependentFragmentGrids Fix_ParseParticles Fix_ParseParticles_split_forward_backward_Actions Fix_PopActions Fix_QtFragmentList_sorted_selection Fix_Restrictedkeyset_FragmentMolecule Fix_StatusMsg Fix_StepWorldTime_single_argument Fix_Verbose_Codepatterns Fix_fitting_potentials Fixes ForceAnnealing_goodresults ForceAnnealing_oldresults ForceAnnealing_tocheck ForceAnnealing_with_BondGraph ForceAnnealing_with_BondGraph_continued ForceAnnealing_with_BondGraph_continued_betteresults ForceAnnealing_with_BondGraph_contraction-expansion FragmentAction_writes_AtomFragments FragmentMolecule_checks_bonddegrees GeometryObjects Gui_Fixes Gui_displays_atomic_force_velocity ImplicitCharges IndependentFragmentGrids IndependentFragmentGrids_IndividualZeroInstances IndependentFragmentGrids_IntegrationTest IndependentFragmentGrids_Sole_NN_Calculation JobMarket_RobustOnKillsSegFaults JobMarket_StableWorkerPool JobMarket_unresolvable_hostname_fix MoreRobust_FragmentAutomation ODR_violation_mpqc_open PartialCharges_OrthogonalSummation PdbParser_setsAtomName PythonUI_with_named_parameters QtGui_reactivate_TimeChanged_changes Recreated_GuiChecks Rewrite_FitPartialCharges RotateToPrincipalAxisSystem_UndoRedo SaturateAtoms_findBestMatching SaturateAtoms_singleDegree StoppableMakroAction Subpackage_CodePatterns Subpackage_JobMarket Subpackage_LinearAlgebra Subpackage_levmar Subpackage_mpqc_open Subpackage_vmg Switchable_LogView ThirdParty_MPQC_rebuilt_buildsystem TrajectoryDependenant_MaxOrder TremoloParser_IncreasedPrecision TremoloParser_MultipleTimesteps TremoloParser_setsAtomName Ubuntu_1604_changes stable
Last change on this file since cf4905 was 69ab84, checked in by Frederik Heber <heber@…>, 12 years ago

FunctionApproximation's cstor now accepts TrainingData instance directly.

  • Property mode set to 100644
File size: 5.7 KB
Line 
1/*
2 * FunctionApproximation.hpp
3 *
4 * Created on: 02.10.2012
5 * Author: heber
6 */
7
8#ifndef FUNCTIONAPPROXIMATION_HPP_
9#define FUNCTIONAPPROXIMATION_HPP_
10
11// include config.h
12#ifdef HAVE_CONFIG_H
13#include <config.h>
14#endif
15
16#include <vector>
17
18#include "FunctionApproximation/FunctionModel.hpp"
19
20class TrainingData;
21
22/** This class encapsulates the solution to approximating a high-dimensional
23 * function represented by two vectors of tuples, being input variables and
24 * output of the function via a model function, manipulated by a set of
25 * parameters.
26 *
27 * \note For this reason the input and output dimension has to be given in
28 * the constructor since these are fixed parameters to the problem as a
29 * whole and usually: a different input dimension means we have a completely
30 * different problem (and hence we may as well construct and new instance of
31 * this class).
32 *
33 * The "training data", i.e. the two sets of input and output values, is
34 * given extra.
35 *
36 * The problem is then that a given high-dimensional function is supplied,
37 * the "model", and we have to fit this function via its set of variable
38 * parameters. This fitting procedure is executed via a Levenberg-Marquardt
39 * algorithm as implemented in the
40 * <a href="http://www.ics.forth.gr/~lourakis/levmar/index.html">LevMar</a>
41 * package.
42 *
43 */
44class FunctionApproximation
45{
46public:
47 //!> typedef for a vector of input arguments
48 typedef std::vector<FunctionModel::arguments_t> inputs_t;
49 //!> typedef for a vector of output values
50 typedef std::vector<FunctionModel::results_t> outputs_t;
51public:
52 /** Constructor of the class FunctionApproximation.
53 *
54 * \param _data container with tuple of (input, output) values
55 * \param _model FunctionModel to use in approximation
56 */
57 FunctionApproximation(
58 const TrainingData &_data,
59 FunctionModel &_model);
60
61 /** Constructor of the class FunctionApproximation.
62 *
63 * \param _input_dimension input dimension for this function approximation
64 * \param _output_dimension output dimension for this function approximation
65 * \param _model FunctionModel to use in approximation
66 */
67 FunctionApproximation(
68 const size_t &_input_dimension,
69 const size_t &_output_dimension,
70 FunctionModel &_model) :
71 input_dimension(_input_dimension),
72 output_dimension(_output_dimension),
73 model(_model)
74 {}
75 /** Destructor for class FunctionApproximation.
76 *
77 */
78 ~FunctionApproximation()
79 {}
80
81 /** Setter for the training data to be used.
82 *
83 * \param input vector of input tuples, needs to be of
84 * FunctionApproximation::input_dimension size
85 * \param output vector of output tuples, needs to be of
86 * FunctionApproximation::output_dimension size
87 */
88 void setTrainingData(const inputs_t &input, const outputs_t &output);
89
90 /** Setter for the model function to be used in the approximation.
91 *
92 */
93 void setModelFunction(FunctionModel &_model);
94
95 /** This enum steers whether we use finite differences or
96 * FunctionModel::parameter_derivative to calculate the jacobian.
97 *
98 */
99 enum JacobianMode {
100 FiniteDifferences,
101 ParameterDerivative,
102 MAXMODE
103 };
104
105 /** This starts the fitting process, resulting in the parameters to
106 * the model function being optimized with respect to the given training
107 * data.
108 *
109 * \param mode whether to use finite differences or the parameter derivative
110 * in calculating the jacobian
111 */
112 void operator()(const enum JacobianMode mode = FiniteDifferences);
113
114 /** Evaluates the model function for each pair of training tuple and returns
115 * the output of the function as a vector.
116 *
117 * This function as a signature compatible to the one required by the
118 * LevMar package (with double precision).
119 *
120 * \param *p array of parameters for the model function of dimension \a m
121 * \param *x array of result values of dimension \a n
122 * \param m parameter dimension
123 * \param n output dimension
124 * \param *data additional data, unused here
125 */
126 void evaluate(double *p, double *x, int m, int n, void *data);
127
128 /** Evaluates the parameter derivative of the model function for each pair of
129 * training tuple and returns the output of the function as vector.
130 *
131 * This function as a signature compatible to the one required by the
132 * LevMar package (with double precision).
133 *
134 * \param *p array of parameters for the model function of dimension \a m
135 * \param *jac on output jacobian matrix of result values of dimension \a n times \a m
136 * \param m parameter dimension
137 * \param n output dimension times parameter dimension
138 * \param *data additional data, unused here
139 */
140 void evaluateDerivative(double *p, double *jac, int m, int n, void *data);
141
142 /** This functions checks whether the parameter derivative of the FunctionModel
143 * has been correctly implemented by validating against finite differences.
144 *
145 * We use LevMar's dlevmar_chkjac() function.
146 *
147 * \return true - gradients are ok (>0.5), false - else
148 */
149 bool checkParameterDerivatives();
150
151private:
152 static void LevMarCallback(double *p, double *x, int m, int n, void *data);
153
154 static void LevMarDerivativeCallback(double *p, double *x, int m, int n, void *data);
155
156 void prepareModel(double *p, int m);
157
158 void prepareParameters(double *&p, int &m) const;
159
160 void prepareOutput(double *&x, int &n) const;
161
162private:
163 //!> input dimension (is fixed from construction)
164 const size_t input_dimension;
165 //!> output dimension (is fixed from construction)
166 const size_t output_dimension;
167
168 //!> current input set of training data
169 inputs_t input_data;
170 //!> current output set of training data
171 outputs_t output_data;
172
173 //!> the model function to be used in the high-dimensional approximation
174 FunctionModel &model;
175};
176
177#endif /* FUNCTIONAPPROXIMATION_HPP_ */
Note: See TracBrowser for help on using the repository browser.