1 | /*
|
---|
2 | * FunctionApproximation.hpp
|
---|
3 | *
|
---|
4 | * Created on: 02.10.2012
|
---|
5 | * Author: heber
|
---|
6 | */
|
---|
7 |
|
---|
8 | #ifndef FUNCTIONAPPROXIMATION_HPP_
|
---|
9 | #define FUNCTIONAPPROXIMATION_HPP_
|
---|
10 |
|
---|
11 | // include config.h
|
---|
12 | #ifdef HAVE_CONFIG_H
|
---|
13 | #include <config.h>
|
---|
14 | #endif
|
---|
15 |
|
---|
16 | #include <vector>
|
---|
17 |
|
---|
18 | #include "FunctionApproximation/FunctionModel.hpp"
|
---|
19 |
|
---|
20 | class TrainingData;
|
---|
21 |
|
---|
22 | /** This class encapsulates the solution to approximating a high-dimensional
|
---|
23 | * function represented by two vectors of tuples, being input variables and
|
---|
24 | * output of the function via a model function, manipulated by a set of
|
---|
25 | * parameters.
|
---|
26 | *
|
---|
27 | * \note For this reason the input and output dimension has to be given in
|
---|
28 | * the constructor since these are fixed parameters to the problem as a
|
---|
29 | * whole and usually: a different input dimension means we have a completely
|
---|
30 | * different problem (and hence we may as well construct and new instance of
|
---|
31 | * this class).
|
---|
32 | *
|
---|
33 | * The "training data", i.e. the two sets of input and output values, is
|
---|
34 | * given extra.
|
---|
35 | *
|
---|
36 | * The problem is then that a given high-dimensional function is supplied,
|
---|
37 | * the "model", and we have to fit this function via its set of variable
|
---|
38 | * parameters. This fitting procedure is executed via a Levenberg-Marquardt
|
---|
39 | * algorithm as implemented in the
|
---|
40 | * <a href="http://www.ics.forth.gr/~lourakis/levmar/index.html">LevMar</a>
|
---|
41 | * package.
|
---|
42 | *
|
---|
43 | */
|
---|
44 | class FunctionApproximation
|
---|
45 | {
|
---|
46 | public:
|
---|
47 | //!> typedef for a vector of input arguments
|
---|
48 | typedef std::vector<FunctionModel::arguments_t> inputs_t;
|
---|
49 | //!> typedef for a vector of output values
|
---|
50 | typedef std::vector<FunctionModel::results_t> outputs_t;
|
---|
51 | public:
|
---|
52 | /** Constructor of the class FunctionApproximation.
|
---|
53 | *
|
---|
54 | * \param _data container with tuple of (input, output) values
|
---|
55 | * \param _model FunctionModel to use in approximation
|
---|
56 | */
|
---|
57 | FunctionApproximation(
|
---|
58 | const TrainingData &_data,
|
---|
59 | FunctionModel &_model);
|
---|
60 |
|
---|
61 | /** Constructor of the class FunctionApproximation.
|
---|
62 | *
|
---|
63 | * \param _input_dimension input dimension for this function approximation
|
---|
64 | * \param _output_dimension output dimension for this function approximation
|
---|
65 | * \param _model FunctionModel to use in approximation
|
---|
66 | */
|
---|
67 | FunctionApproximation(
|
---|
68 | const size_t &_input_dimension,
|
---|
69 | const size_t &_output_dimension,
|
---|
70 | FunctionModel &_model) :
|
---|
71 | input_dimension(_input_dimension),
|
---|
72 | output_dimension(_output_dimension),
|
---|
73 | model(_model)
|
---|
74 | {}
|
---|
75 | /** Destructor for class FunctionApproximation.
|
---|
76 | *
|
---|
77 | */
|
---|
78 | ~FunctionApproximation()
|
---|
79 | {}
|
---|
80 |
|
---|
81 | /** Setter for the training data to be used.
|
---|
82 | *
|
---|
83 | * \param input vector of input tuples, needs to be of
|
---|
84 | * FunctionApproximation::input_dimension size
|
---|
85 | * \param output vector of output tuples, needs to be of
|
---|
86 | * FunctionApproximation::output_dimension size
|
---|
87 | */
|
---|
88 | void setTrainingData(const inputs_t &input, const outputs_t &output);
|
---|
89 |
|
---|
90 | /** Setter for the model function to be used in the approximation.
|
---|
91 | *
|
---|
92 | */
|
---|
93 | void setModelFunction(FunctionModel &_model);
|
---|
94 |
|
---|
95 | /** This enum steers whether we use finite differences or
|
---|
96 | * FunctionModel::parameter_derivative to calculate the jacobian.
|
---|
97 | *
|
---|
98 | */
|
---|
99 | enum JacobianMode {
|
---|
100 | FiniteDifferences,
|
---|
101 | ParameterDerivative,
|
---|
102 | MAXMODE
|
---|
103 | };
|
---|
104 |
|
---|
105 | /** This starts the fitting process, resulting in the parameters to
|
---|
106 | * the model function being optimized with respect to the given training
|
---|
107 | * data.
|
---|
108 | *
|
---|
109 | * \param mode whether to use finite differences or the parameter derivative
|
---|
110 | * in calculating the jacobian
|
---|
111 | */
|
---|
112 | void operator()(const enum JacobianMode mode = FiniteDifferences);
|
---|
113 |
|
---|
114 | /** Evaluates the model function for each pair of training tuple and returns
|
---|
115 | * the output of the function as a vector.
|
---|
116 | *
|
---|
117 | * This function as a signature compatible to the one required by the
|
---|
118 | * LevMar package (with double precision).
|
---|
119 | *
|
---|
120 | * \param *p array of parameters for the model function of dimension \a m
|
---|
121 | * \param *x array of result values of dimension \a n
|
---|
122 | * \param m parameter dimension
|
---|
123 | * \param n output dimension
|
---|
124 | * \param *data additional data, unused here
|
---|
125 | */
|
---|
126 | void evaluate(double *p, double *x, int m, int n, void *data);
|
---|
127 |
|
---|
128 | /** Evaluates the parameter derivative of the model function for each pair of
|
---|
129 | * training tuple and returns the output of the function as vector.
|
---|
130 | *
|
---|
131 | * This function as a signature compatible to the one required by the
|
---|
132 | * LevMar package (with double precision).
|
---|
133 | *
|
---|
134 | * \param *p array of parameters for the model function of dimension \a m
|
---|
135 | * \param *jac on output jacobian matrix of result values of dimension \a n times \a m
|
---|
136 | * \param m parameter dimension
|
---|
137 | * \param n output dimension times parameter dimension
|
---|
138 | * \param *data additional data, unused here
|
---|
139 | */
|
---|
140 | void evaluateDerivative(double *p, double *jac, int m, int n, void *data);
|
---|
141 |
|
---|
142 | /** This functions checks whether the parameter derivative of the FunctionModel
|
---|
143 | * has been correctly implemented by validating against finite differences.
|
---|
144 | *
|
---|
145 | * We use LevMar's dlevmar_chkjac() function.
|
---|
146 | *
|
---|
147 | * \return true - gradients are ok (>0.5), false - else
|
---|
148 | */
|
---|
149 | bool checkParameterDerivatives();
|
---|
150 |
|
---|
151 | private:
|
---|
152 | static void LevMarCallback(double *p, double *x, int m, int n, void *data);
|
---|
153 |
|
---|
154 | static void LevMarDerivativeCallback(double *p, double *x, int m, int n, void *data);
|
---|
155 |
|
---|
156 | void prepareModel(double *p, int m);
|
---|
157 |
|
---|
158 | void prepareParameters(double *&p, int &m) const;
|
---|
159 |
|
---|
160 | void prepareOutput(double *&x, int &n) const;
|
---|
161 |
|
---|
162 | private:
|
---|
163 | //!> input dimension (is fixed from construction)
|
---|
164 | const size_t input_dimension;
|
---|
165 | //!> output dimension (is fixed from construction)
|
---|
166 | const size_t output_dimension;
|
---|
167 |
|
---|
168 | //!> current input set of training data
|
---|
169 | inputs_t input_data;
|
---|
170 | //!> current output set of training data
|
---|
171 | outputs_t output_data;
|
---|
172 |
|
---|
173 | //!> the model function to be used in the high-dimensional approximation
|
---|
174 | FunctionModel &model;
|
---|
175 | };
|
---|
176 |
|
---|
177 | #endif /* FUNCTIONAPPROXIMATION_HPP_ */
|
---|