source: ThirdParty/mpqc_open/src/lib/math/optimize/qnewton.cc@ 7516f6

Action_Thermostats Adding_MD_integration_tests Adding_StructOpt_integration_tests AutomationFragmentation_failures Candidate_v1.6.1 ChemicalSpaceEvaluator Enhanced_StructuralOptimization Enhanced_StructuralOptimization_continued Exclude_Hydrogens_annealWithBondGraph Fix_Verbose_Codepatterns ForceAnnealing_with_BondGraph ForceAnnealing_with_BondGraph_continued ForceAnnealing_with_BondGraph_continued_betteresults ForceAnnealing_with_BondGraph_contraction-expansion Gui_displays_atomic_force_velocity JobMarket_RobustOnKillsSegFaults JobMarket_StableWorkerPool PythonUI_with_named_parameters Recreated_GuiChecks StoppableMakroAction TremoloParser_IncreasedPrecision
Last change on this file since 7516f6 was 860145, checked in by Frederik Heber <heber@…>, 8 years ago

Merge commit '0b990dfaa8c6007a996d030163a25f7f5fc8a7e7' as 'ThirdParty/mpqc_open'

  • Property mode set to 100644
File size: 12.9 KB
Line 
1//
2// qnewton.cc
3//
4// Copyright (C) 1996 Limit Point Systems, Inc.
5//
6// Author: Curtis Janssen <cljanss@limitpt.com>
7// Maintainer: LPS
8//
9// This file is part of the SC Toolkit.
10//
11// The SC Toolkit is free software; you can redistribute it and/or modify
12// it under the terms of the GNU Library General Public License as published by
13// the Free Software Foundation; either version 2, or (at your option)
14// any later version.
15//
16// The SC Toolkit is distributed in the hope that it will be useful,
17// but WITHOUT ANY WARRANTY; without even the implied warranty of
18// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19// GNU Library General Public License for more details.
20//
21// You should have received a copy of the GNU Library General Public License
22// along with the SC Toolkit; see the file COPYING.LIB. If not, write to
23// the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24//
25// The U.S. Government is granted a limited license as per AL 91-7.
26//
27
28#ifdef __GNUC__
29#pragma implementation
30#endif
31
32#include <math.h>
33#include <float.h>
34
35#include <util/state/stateio.h>
36#include <math/optimize/qnewton.h>
37#include <util/keyval/keyval.h>
38#include <util/misc/formio.h>
39
40using namespace std;
41using namespace sc;
42
43/////////////////////////////////////////////////////////////////////////
44// QNewtonOpt
45
46static ClassDesc QNewtonOpt_cd(
47 typeid(QNewtonOpt),"QNewtonOpt",2,"public Optimize",
48 0, create<QNewtonOpt>, create<QNewtonOpt>);
49
50QNewtonOpt::QNewtonOpt(const Ref<KeyVal>&keyval):
51 Optimize(keyval)
52{
53
54 if (function_.null()) {
55 ExEnv::err0() << "QNewtonOpt requires a function keyword" << endl;
56 abort();
57 }
58
59 init();
60
61 update_ << keyval->describedclassvalue("update");
62 if (update_.nonnull()) update_->set_inverse();
63 lineopt_ << keyval->describedclassvalue("lineopt");
64 accuracy_ = keyval->doublevalue("accuracy");
65 if (keyval->error() != KeyVal::OK) accuracy_ = 0.0001;
66 print_x_ = keyval->booleanvalue("print_x");
67 print_hessian_ = keyval->booleanvalue("print_hessian");
68 print_gradient_ = keyval->booleanvalue("print_gradient");
69 linear_ = keyval->booleanvalue("linear");
70 if (keyval->error() != KeyVal::OK) linear_ = 0;
71 restrict_ = keyval->booleanvalue("restrict");
72 if (keyval->error() != KeyVal::OK) restrict_ = 1;
73 dynamic_grad_acc_ = keyval->booleanvalue("dynamic_grad_acc");
74 if (keyval->error() != KeyVal::OK) dynamic_grad_acc_ = 1;
75 force_search_ = keyval->booleanvalue("force_search");
76 if (keyval->error() != KeyVal::OK) force_search_ = 0;
77 restart_ = keyval->booleanvalue("restart");
78 if (keyval->error() != KeyVal::OK) restart_ = 1;
79
80 RefSymmSCMatrix hessian(dimension(),matrixkit());
81 // get a guess hessian from the function
82 function()->guess_hessian(hessian);
83
84 // see if any hessian matrix elements have been given in the input
85 if (keyval->exists("hessian")) {
86 int n = hessian.n();
87 for (int i=0; i<n; i++) {
88 if (keyval->exists("hessian",i)) {
89 for (int j=0; j<=i; j++) {
90 double tmp = keyval->doublevalue("hessian",i,j);
91 if (keyval->error() == KeyVal::OK) hessian(i,j) = tmp;
92 }
93 }
94 }
95 }
96 ihessian_ = function()->inverse_hessian(hessian);
97}
98
99QNewtonOpt::QNewtonOpt(StateIn&s):
100 SavableState(s),
101 Optimize(s)
102{
103 ihessian_ = matrixkit()->symmmatrix(dimension());
104 ihessian_.restore(s);
105 update_ << SavableState::restore_state(s);
106 s.get(accuracy_);
107 s.get(take_newton_step_);
108 s.get(maxabs_gradient);
109 if (s.version(::class_desc<QNewtonOpt>()) > 1) {
110 s.get(print_hessian_);
111 s.get(print_x_);
112 s.get(print_gradient_);
113 }
114 else {
115 print_hessian_ = 0;
116 print_x_ = 0;
117 print_gradient_ = 0;
118 }
119 lineopt_ << SavableState::restore_state(s);
120}
121
122QNewtonOpt::~QNewtonOpt()
123{
124}
125
126void
127QNewtonOpt::save_data_state(StateOut&s)
128{
129 Optimize::save_data_state(s);
130 ihessian_.save(s);
131 SavableState::save_state(update_.pointer(),s);
132 s.put(accuracy_);
133 s.put(take_newton_step_);
134 s.put(maxabs_gradient);
135 s.put(print_hessian_);
136 s.put(print_x_);
137 s.put(print_gradient_);
138 SavableState::save_state(lineopt_.pointer(),s);
139}
140
141void
142QNewtonOpt::init()
143{
144 Optimize::init();
145 take_newton_step_ = 1;
146 maxabs_gradient = -1.0;
147}
148
149int
150QNewtonOpt::update()
151{
152 // these are good candidates to be input options
153 const double maxabs_gradient_to_desired_accuracy = 0.05;
154 const double maxabs_gradient_to_next_desired_accuracy = 0.005;
155 const double roundoff_error_factor = 1.1;
156
157 // the gradient convergence criterion.
158 double old_maxabs_gradient = maxabs_gradient;
159 RefSCVector xcurrent;
160 RefSCVector gcurrent;
161
162 if( dynamic_grad_acc_ ) {
163 // get the next gradient at the required level of accuracy.
164 // usually only one pass is needed, unless we happen to find
165 // that the accuracy was set too low.
166 int accurate_enough;
167 do {
168 // compute the current point
169 function()->set_desired_gradient_accuracy(accuracy_);
170 xcurrent = function()->get_x();
171 gcurrent = function()->gradient().copy();
172
173 // compute the gradient convergence criterion now so i can see if
174 // the accuracy needs to be tighter
175 maxabs_gradient = gcurrent.maxabs();
176 // compute the required accuracy
177 accuracy_ = maxabs_gradient * maxabs_gradient_to_desired_accuracy;
178
179 if (accuracy_ < DBL_EPSILON) accuracy_ = DBL_EPSILON;
180
181 // The roundoff_error_factor is thrown in to allow for round off making
182 // the current gcurrent.maxabs() a bit smaller than the previous,
183 // which would make the current required accuracy less than the
184 // gradient's actual accuracy and cause everything to be recomputed.
185 accurate_enough = (
186 function()->actual_gradient_accuracy()
187 <= accuracy_*roundoff_error_factor);
188
189 if (!accurate_enough) {
190 ExEnv::out0().unsetf(ios::fixed);
191 ExEnv::out0() << indent <<
192 "NOTICE: function()->actual_gradient_accuracy() > accuracy_:\n"
193 << indent
194 << scprintf(
195 " function()->actual_gradient_accuracy() = %15.8e",
196 function()->actual_gradient_accuracy()) << endl << indent
197 << scprintf(
198 " accuracy_ = %15.8e",
199 accuracy_) << endl;
200 }
201 } while(!accurate_enough);
202 // increase accuracy, since the next gradient will be smaller
203 accuracy_ = maxabs_gradient * maxabs_gradient_to_next_desired_accuracy;
204 }
205 else {
206 xcurrent = function()->get_x();
207 gcurrent = function()->gradient().copy();
208 }
209
210 if (old_maxabs_gradient >= 0.0 && old_maxabs_gradient < maxabs_gradient) {
211 ExEnv::out0() << indent
212 << scprintf("NOTICE: maxabs_gradient increased from %8.4e to %8.4e",
213 old_maxabs_gradient, maxabs_gradient) << endl;
214 }
215
216 // update the hessian
217 if(update_.nonnull())
218 update_->update(ihessian_,function(),xcurrent,gcurrent);
219
220 conv_->reset();
221 conv_->get_grad(function());
222 conv_->get_x(function());
223
224 // compute the quadratic step
225 RefSCVector xdisp = -1.0*(ihessian_ * gcurrent);
226 RefSCVector xnext = xcurrent + xdisp;
227
228 // either do a lineopt or check stepsize
229 double tot;
230 if(lineopt_.nonnull()) {
231 if (dynamic_cast<Backtrack*>(lineopt_.pointer()) != 0) {
232 // The Backtrack line search is a special case.
233
234 // perform a search
235 double factor;
236 if( n_iterations_ == 0 && force_search_ )
237 factor = lineopt_->set_decrease_factor(1.0);
238 lineopt_->init(xdisp,function());
239 // reset value acc here so line search "precomputes" are
240 // accurate enough for subsequent gradient evals
241 function()->set_desired_value_accuracy(accuracy_/100);
242 int acceptable = lineopt_->update();
243 if( n_iterations_ == 0 && force_search_ )
244 lineopt_->set_decrease_factor( factor );
245
246 if( !acceptable ) {
247 if( force_search_ ) factor = lineopt_->set_decrease_factor(1.0);
248
249 // try a new guess hessian
250 if( restart_ ) {
251 ExEnv::out0() << endl << indent <<
252 "Restarting Hessian approximation" << endl;
253 RefSymmSCMatrix hessian(dimension(),matrixkit());
254 function()->guess_hessian(hessian);
255 ihessian_ = function()->inverse_hessian(hessian);
256 xdisp = -1.0 * (ihessian_ * gcurrent);
257 lineopt_->init(xdisp,function());
258 acceptable = lineopt_->update();
259 }
260
261 // try steepest descent direction
262 if( !acceptable ) {
263 ExEnv::out0() << endl << indent <<
264 "Trying steepest descent direction." << endl;
265 xdisp = -1.0 * gcurrent;
266 lineopt_->init(xdisp,function());
267 acceptable = lineopt_->update();
268 }
269
270 // give up and use steepest descent step
271 if( !acceptable ) {
272 ExEnv::out0() << endl << indent <<
273 "Resorting to unscaled steepest descent step." << endl;
274 function()->set_x(xcurrent + xdisp);
275 Ref<NonlinearTransform> t = function()->change_coordinates();
276 apply_transform(t);
277 }
278
279 if( force_search_ ) lineopt_->set_decrease_factor( factor );
280 }
281 }
282 else {
283 // All line searches other than Backtrack use this
284 ExEnv::out0() << indent
285 << "......................................."
286 << endl
287 << indent
288 << "Starting line optimization."
289 << endl;
290 lineopt_->init(xdisp,function());
291 int nlineopt = 0;
292 int maxlineopt = 3;
293 for (int ilineopt=0; ilineopt<maxlineopt; ilineopt++) {
294 double maxabs_gradient = function()->gradient()->maxabs();
295
296 int converged = lineopt_->update();
297
298 ExEnv::out0() << indent
299 << "Completed line optimization step " << ilineopt+1
300 << (converged?" (converged)":" (not converged)")
301 << endl
302 << indent
303 << "......................................."
304 << endl;
305
306 if (converged) break;
307
308 // Improve accuracy, since we might be able to reuse the next
309 // gradient for the next quasi-Newton step.
310 if (dynamic_grad_acc_) {
311 accuracy_ = maxabs_gradient*maxabs_gradient_to_next_desired_accuracy;
312 function()->set_desired_gradient_accuracy(accuracy_);
313 }
314 }
315 }
316 xnext = function()->get_x();
317 xdisp = xnext - xcurrent;
318 tot = sqrt(xdisp.scalar_product(xdisp));
319 }
320 else {
321
322 tot = sqrt(xdisp.scalar_product(xdisp));
323
324 if ( tot > max_stepsize_ ) {
325 if( restrict_ ) {
326 double scal = max_stepsize_/tot;
327 ExEnv::out0() << endl << indent <<
328 scprintf("stepsize of %f is too big, scaling by %f",tot,scal)
329 << endl;
330 xdisp.scale(scal);
331 tot *= scal;
332 }
333 else {
334 ExEnv::out0() << endl << indent <<
335 scprintf("stepsize of %f is too big, but scaling is disabled",tot)
336 << endl;
337 }
338 }
339 xnext = xcurrent + xdisp;
340 }
341
342 if (print_hessian_) {
343 RefSymmSCMatrix hessian = ihessian_.gi();
344 ExEnv::out0() << indent << "hessian = [" << endl;
345 ExEnv::out0() << incindent;
346 int n = hessian.n();
347 for (int i=0; i<n; i++) {
348 ExEnv::out0() << indent << "[";
349 for (int j=0; j<=i; j++) {
350 ExEnv::out0() << scprintf(" % 10.6f",double(hessian(i,j)));
351 }
352 ExEnv::out0() << " ]" << endl;
353 }
354 ExEnv::out0() << decindent;
355 ExEnv::out0() << indent << "]" << endl;
356 }
357 if (print_x_) {
358 int n = xcurrent.n();
359 ExEnv::out0() << indent << "x = [";
360 for (int i=0; i<n; i++) {
361 ExEnv::out0() << scprintf(" % 16.12f",double(xcurrent(i)));
362 }
363 ExEnv::out0() << " ]" << endl;
364 }
365 if (print_gradient_) {
366 int n = gcurrent.n();
367 ExEnv::out0() << indent << "gradient = [";
368 for (int i=0; i<n; i++) {
369 ExEnv::out0() << scprintf(" % 16.12f",double(gcurrent(i)));
370 }
371 ExEnv::out0() << " ]" << endl;
372 }
373
374 // check for convergence
375 conv_->set_nextx(xnext);
376 int converged = conv_->converged();
377 if (converged) return converged;
378
379 ExEnv::out0() << indent
380 << scprintf("taking step of size %f", tot) << endl;
381 ExEnv::out0() << indent << "Optimization iteration "
382 << n_iterations_ + 1 << " complete" << endl;
383 ExEnv::out0() << indent
384 << "//////////////////////////////////////////////////////////////////////"
385 << endl;
386
387 if( lineopt_.null() ) {
388 function()->set_x(xnext);
389 Ref<NonlinearTransform> t = function()->change_coordinates();
390 apply_transform(t);
391 }
392
393 if( dynamic_grad_acc_ )
394 function()->set_desired_gradient_accuracy(accuracy_);
395
396 return converged;
397}
398
399void
400QNewtonOpt::apply_transform(const Ref<NonlinearTransform> &t)
401{
402 if (t.null()) return;
403 Optimize::apply_transform(t);
404 if (lineopt_.nonnull()) lineopt_->apply_transform(t);
405 if (ihessian_.nonnull()) t->transform_ihessian(ihessian_);
406 if (update_.nonnull()) update_->apply_transform(t);
407}
408
409/////////////////////////////////////////////////////////////////////////////
410
411// Local Variables:
412// mode: c++
413// c-file-style: "ETS"
414// End:
Note: See TracBrowser for help on using the repository browser.