root / ase / optimize / sciopt.py @ 1
Historique | Voir | Annoter | Télécharger (9,07 ko)
1 |
import numpy as np |
---|---|
2 |
try:
|
3 |
import scipy.optimize as opt |
4 |
except ImportError: |
5 |
pass
|
6 |
|
7 |
from ase.optimize.optimize import Optimizer |
8 |
|
9 |
|
10 |
class Converged(Exception): |
11 |
pass
|
12 |
|
13 |
class OptimizerConvergenceError(Exception): |
14 |
pass
|
15 |
|
16 |
class SciPyOptimizer(Optimizer): |
17 |
"""General interface for SciPy optimizers
|
18 |
|
19 |
Only the call to the optimizer is still needed
|
20 |
"""
|
21 |
def __init__(self, atoms, logfile='-', trajectory=None, |
22 |
callback_always=False, alpha=70.0): |
23 |
"""Initialize object
|
24 |
|
25 |
Parameters:
|
26 |
|
27 |
callback_always: book
|
28 |
Should the callback be run after each force call (also in the
|
29 |
linesearch)
|
30 |
|
31 |
alpha: float
|
32 |
Initial guess for the Hessian (curvature of energy surface). A
|
33 |
conservative value of 70.0 is the default, but number of needed
|
34 |
steps to converge might be less if a lower value is used. However,
|
35 |
a lower value also means risk of instability.
|
36 |
|
37 |
"""
|
38 |
restart = None
|
39 |
Optimizer.__init__(self, atoms, restart, logfile, trajectory)
|
40 |
self.force_calls = 0 |
41 |
self.callback_always = callback_always
|
42 |
self.H0 = alpha
|
43 |
|
44 |
def x0(self): |
45 |
"""Return x0 in a way SciPy can use
|
46 |
|
47 |
This class is mostly usable for subclasses wanting to redefine the
|
48 |
parameters (and the objective function)"""
|
49 |
return self.atoms.get_positions().reshape(-1) |
50 |
|
51 |
def f(self, x): |
52 |
"""Objective function for use of the optimizers"""
|
53 |
self.atoms.set_positions(x.reshape(-1, 3)) |
54 |
# Scale the problem as SciPy uses I as initial Hessian.
|
55 |
return self.atoms.get_potential_energy() / self.H0 |
56 |
|
57 |
def fprime(self, x): |
58 |
"""Gradient of the objective function for use of the optimizers"""
|
59 |
self.atoms.set_positions(x.reshape(-1, 3)) |
60 |
self.force_calls += 1 |
61 |
|
62 |
if self.callback_always: |
63 |
self.callback(x)
|
64 |
|
65 |
# Remember that forces are minus the gradient!
|
66 |
# Scale the problem as SciPy uses I as initial Hessian.
|
67 |
return - self.atoms.get_forces().reshape(-1) / self.H0 |
68 |
|
69 |
def callback(self, x): |
70 |
"""Callback function to be run after each iteration by SciPy
|
71 |
|
72 |
This should also be called once before optimization starts, as SciPy
|
73 |
optimizers only calls it after each iteration, while ase optimizers
|
74 |
call something similar before as well.
|
75 |
"""
|
76 |
f = self.atoms.get_forces()
|
77 |
self.log(f)
|
78 |
self.call_observers()
|
79 |
if self.converged(f): |
80 |
raise Converged
|
81 |
self.nsteps += 1 |
82 |
|
83 |
def run(self, fmax=0.05, steps=100000000): |
84 |
self.fmax = fmax
|
85 |
# As SciPy does not log the zeroth iteration, we do that manually
|
86 |
self.callback(None) |
87 |
try:
|
88 |
# Scale the problem as SciPy uses I as initial Hessian.
|
89 |
self.call_fmin(fmax / self.H0, steps) |
90 |
except Converged:
|
91 |
pass
|
92 |
|
93 |
def dump(self, data): |
94 |
pass
|
95 |
|
96 |
def load(self): |
97 |
pass
|
98 |
|
99 |
def call_fmin(self, fmax, steps): |
100 |
raise NotImplementedError |
101 |
|
102 |
class SciPyFminCG(SciPyOptimizer): |
103 |
"""Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
|
104 |
def call_fmin(self, fmax, steps): |
105 |
output = opt.fmin_cg(self.f,
|
106 |
self.x0(),
|
107 |
fprime=self.fprime,
|
108 |
#args=(),
|
109 |
gtol=fmax * 0.1, #Should never be reached |
110 |
norm=np.inf, |
111 |
#epsilon=
|
112 |
maxiter=steps, |
113 |
full_output=1,
|
114 |
disp=0,
|
115 |
#retall=0,
|
116 |
callback=self.callback
|
117 |
) |
118 |
warnflag = output[-1]
|
119 |
if warnflag == 2: |
120 |
raise OptimizerConvergenceError('Warning: Desired error not necessarily achieved ' \ |
121 |
'due to precision loss')
|
122 |
|
123 |
class SciPyFminBFGS(SciPyOptimizer): |
124 |
"""Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
|
125 |
def call_fmin(self, fmax, steps): |
126 |
output = opt.fmin_bfgs(self.f,
|
127 |
self.x0(),
|
128 |
fprime=self.fprime,
|
129 |
#args=(),
|
130 |
gtol=fmax * 0.1, #Should never be reached |
131 |
norm=np.inf, |
132 |
#epsilon=1.4901161193847656e-08,
|
133 |
maxiter=steps, |
134 |
full_output=1,
|
135 |
disp=0,
|
136 |
#retall=0,
|
137 |
callback=self.callback
|
138 |
) |
139 |
warnflag = output[-1]
|
140 |
if warnflag == 2: |
141 |
raise OptimizerConvergenceError('Warning: Desired error not necessarily achieved' \ |
142 |
'due to precision loss')
|
143 |
|
144 |
class SciPyGradientlessOptimizer(Optimizer): |
145 |
"""General interface for gradient less SciPy optimizers
|
146 |
|
147 |
Only the call to the optimizer is still needed
|
148 |
|
149 |
Note: If you redefien x0() and f(), you don't even need an atoms object.
|
150 |
Redefining these also allows you to specify an arbitrary objective
|
151 |
function.
|
152 |
|
153 |
XXX: This is still a work in progress
|
154 |
"""
|
155 |
def __init__(self, atoms, logfile='-', trajectory=None, |
156 |
callback_always=False):
|
157 |
"""Parameters:
|
158 |
|
159 |
callback_always: book
|
160 |
Should the callback be run after each force call (also in the
|
161 |
linesearch)
|
162 |
"""
|
163 |
restart = None
|
164 |
Optimizer.__init__(self, atoms, restart, logfile, trajectory)
|
165 |
self.function_calls = 0 |
166 |
self.callback_always = callback_always
|
167 |
|
168 |
def x0(self): |
169 |
"""Return x0 in a way SciPy can use
|
170 |
|
171 |
This class is mostly usable for subclasses wanting to redefine the
|
172 |
parameters (and the objective function)"""
|
173 |
return self.atoms.get_positions().reshape(-1) |
174 |
|
175 |
def f(self, x): |
176 |
"""Objective function for use of the optimizers"""
|
177 |
self.atoms.set_positions(x.reshape(-1, 3)) |
178 |
self.function_calls += 1 |
179 |
# Scale the problem as SciPy uses I as initial Hessian.
|
180 |
return self.atoms.get_potential_energy() |
181 |
|
182 |
def callback(self, x): |
183 |
"""Callback function to be run after each iteration by SciPy
|
184 |
|
185 |
This should also be called once before optimization starts, as SciPy
|
186 |
optimizers only calls it after each iteration, while ase optimizers
|
187 |
call something similar before as well.
|
188 |
"""
|
189 |
# We can't assume that forces are available!
|
190 |
#f = self.atoms.get_forces()
|
191 |
#self.log(f)
|
192 |
self.call_observers()
|
193 |
#if self.converged(f):
|
194 |
# raise Converged
|
195 |
self.nsteps += 1 |
196 |
|
197 |
def run(self, ftol=0.01, xtol=0.01, steps=100000000): |
198 |
self.xtol = xtol
|
199 |
self.ftol = ftol
|
200 |
# As SciPy does not log the zeroth iteration, we do that manually
|
201 |
self.callback(None) |
202 |
try:
|
203 |
# Scale the problem as SciPy uses I as initial Hessian.
|
204 |
self.call_fmin(xtol, ftol, steps)
|
205 |
except Converged:
|
206 |
pass
|
207 |
|
208 |
def dump(self, data): |
209 |
pass
|
210 |
|
211 |
def load(self): |
212 |
pass
|
213 |
|
214 |
def call_fmin(self, fmax, steps): |
215 |
raise NotImplementedError |
216 |
|
217 |
class SciPyFmin(SciPyGradientlessOptimizer): |
218 |
"""Nelder-Mead Simplex algorithm
|
219 |
|
220 |
Uses only function calls.
|
221 |
|
222 |
XXX: This is still a work in progress
|
223 |
"""
|
224 |
def call_fmin(self, xtol, ftol, steps): |
225 |
output = opt.fmin(self.f,
|
226 |
self.x0(),
|
227 |
#args=(),
|
228 |
xtol=xtol, |
229 |
ftol=ftol, |
230 |
maxiter=steps, |
231 |
#maxfun=None,
|
232 |
#full_output=1,
|
233 |
disp=0,
|
234 |
#retall=0,
|
235 |
callback=self.callback
|
236 |
) |
237 |
|
238 |
class SciPyFminPowell(SciPyGradientlessOptimizer): |
239 |
"""Powell's (modified) level set method
|
240 |
|
241 |
Uses only function calls.
|
242 |
|
243 |
XXX: This is still a work in progress
|
244 |
"""
|
245 |
def __init__(self, *args, **kwargs): |
246 |
"""Parameters:
|
247 |
|
248 |
direc: float
|
249 |
How much to change x to initially. Defaults to 0.04.
|
250 |
"""
|
251 |
direc = kwargs.pop('direc', None) |
252 |
SciPyGradientlessOptimizer.__init__(self, *args, **kwargs)
|
253 |
|
254 |
if direc is None: |
255 |
self.direc = np.eye(len(self.x0()), dtype=float) * 0.04 |
256 |
else:
|
257 |
self.direc = np.eye(len(self.x0()), dtype=float) * direc |
258 |
|
259 |
def call_fmin(self, xtol, ftol, steps): |
260 |
output = opt.fmin_powell(self.f,
|
261 |
self.x0(),
|
262 |
#args=(),
|
263 |
xtol=xtol, |
264 |
ftol=ftol, |
265 |
maxiter=steps, |
266 |
#maxfun=None,
|
267 |
#full_output=1,
|
268 |
disp=0,
|
269 |
#retall=0,
|
270 |
callback=self.callback,
|
271 |
direc=self.direc
|
272 |
) |