root / ase / optimize / sciopt.py @ 2
Historique | Voir | Annoter | Télécharger (9,07 ko)
1 | 1 | tkerber | import numpy as np |
---|---|---|---|
2 | 1 | tkerber | try:
|
3 | 1 | tkerber | import scipy.optimize as opt |
4 | 1 | tkerber | except ImportError: |
5 | 1 | tkerber | pass
|
6 | 1 | tkerber | |
7 | 1 | tkerber | from ase.optimize.optimize import Optimizer |
8 | 1 | tkerber | |
9 | 1 | tkerber | |
10 | 1 | tkerber | class Converged(Exception): |
11 | 1 | tkerber | pass
|
12 | 1 | tkerber | |
13 | 1 | tkerber | class OptimizerConvergenceError(Exception): |
14 | 1 | tkerber | pass
|
15 | 1 | tkerber | |
16 | 1 | tkerber | class SciPyOptimizer(Optimizer): |
17 | 1 | tkerber | """General interface for SciPy optimizers
|
18 | 1 | tkerber |
|
19 | 1 | tkerber | Only the call to the optimizer is still needed
|
20 | 1 | tkerber | """
|
21 | 1 | tkerber | def __init__(self, atoms, logfile='-', trajectory=None, |
22 | 1 | tkerber | callback_always=False, alpha=70.0): |
23 | 1 | tkerber | """Initialize object
|
24 | 1 | tkerber |
|
25 | 1 | tkerber | Parameters:
|
26 | 1 | tkerber |
|
27 | 1 | tkerber | callback_always: book
|
28 | 1 | tkerber | Should the callback be run after each force call (also in the
|
29 | 1 | tkerber | linesearch)
|
30 | 1 | tkerber |
|
31 | 1 | tkerber | alpha: float
|
32 | 1 | tkerber | Initial guess for the Hessian (curvature of energy surface). A
|
33 | 1 | tkerber | conservative value of 70.0 is the default, but number of needed
|
34 | 1 | tkerber | steps to converge might be less if a lower value is used. However,
|
35 | 1 | tkerber | a lower value also means risk of instability.
|
36 | 1 | tkerber |
|
37 | 1 | tkerber | """
|
38 | 1 | tkerber | restart = None
|
39 | 1 | tkerber | Optimizer.__init__(self, atoms, restart, logfile, trajectory)
|
40 | 1 | tkerber | self.force_calls = 0 |
41 | 1 | tkerber | self.callback_always = callback_always
|
42 | 1 | tkerber | self.H0 = alpha
|
43 | 1 | tkerber | |
44 | 1 | tkerber | def x0(self): |
45 | 1 | tkerber | """Return x0 in a way SciPy can use
|
46 | 1 | tkerber |
|
47 | 1 | tkerber | This class is mostly usable for subclasses wanting to redefine the
|
48 | 1 | tkerber | parameters (and the objective function)"""
|
49 | 1 | tkerber | return self.atoms.get_positions().reshape(-1) |
50 | 1 | tkerber | |
51 | 1 | tkerber | def f(self, x): |
52 | 1 | tkerber | """Objective function for use of the optimizers"""
|
53 | 1 | tkerber | self.atoms.set_positions(x.reshape(-1, 3)) |
54 | 1 | tkerber | # Scale the problem as SciPy uses I as initial Hessian.
|
55 | 1 | tkerber | return self.atoms.get_potential_energy() / self.H0 |
56 | 1 | tkerber | |
57 | 1 | tkerber | def fprime(self, x): |
58 | 1 | tkerber | """Gradient of the objective function for use of the optimizers"""
|
59 | 1 | tkerber | self.atoms.set_positions(x.reshape(-1, 3)) |
60 | 1 | tkerber | self.force_calls += 1 |
61 | 1 | tkerber | |
62 | 1 | tkerber | if self.callback_always: |
63 | 1 | tkerber | self.callback(x)
|
64 | 1 | tkerber | |
65 | 1 | tkerber | # Remember that forces are minus the gradient!
|
66 | 1 | tkerber | # Scale the problem as SciPy uses I as initial Hessian.
|
67 | 1 | tkerber | return - self.atoms.get_forces().reshape(-1) / self.H0 |
68 | 1 | tkerber | |
69 | 1 | tkerber | def callback(self, x): |
70 | 1 | tkerber | """Callback function to be run after each iteration by SciPy
|
71 | 1 | tkerber |
|
72 | 1 | tkerber | This should also be called once before optimization starts, as SciPy
|
73 | 1 | tkerber | optimizers only calls it after each iteration, while ase optimizers
|
74 | 1 | tkerber | call something similar before as well.
|
75 | 1 | tkerber | """
|
76 | 1 | tkerber | f = self.atoms.get_forces()
|
77 | 1 | tkerber | self.log(f)
|
78 | 1 | tkerber | self.call_observers()
|
79 | 1 | tkerber | if self.converged(f): |
80 | 1 | tkerber | raise Converged
|
81 | 1 | tkerber | self.nsteps += 1 |
82 | 1 | tkerber | |
83 | 1 | tkerber | def run(self, fmax=0.05, steps=100000000): |
84 | 1 | tkerber | self.fmax = fmax
|
85 | 1 | tkerber | # As SciPy does not log the zeroth iteration, we do that manually
|
86 | 1 | tkerber | self.callback(None) |
87 | 1 | tkerber | try:
|
88 | 1 | tkerber | # Scale the problem as SciPy uses I as initial Hessian.
|
89 | 1 | tkerber | self.call_fmin(fmax / self.H0, steps) |
90 | 1 | tkerber | except Converged:
|
91 | 1 | tkerber | pass
|
92 | 1 | tkerber | |
93 | 1 | tkerber | def dump(self, data): |
94 | 1 | tkerber | pass
|
95 | 1 | tkerber | |
96 | 1 | tkerber | def load(self): |
97 | 1 | tkerber | pass
|
98 | 1 | tkerber | |
99 | 1 | tkerber | def call_fmin(self, fmax, steps): |
100 | 1 | tkerber | raise NotImplementedError |
101 | 1 | tkerber | |
102 | 1 | tkerber | class SciPyFminCG(SciPyOptimizer): |
103 | 1 | tkerber | """Non-linear (Polak-Ribiere) conjugate gradient algorithm"""
|
104 | 1 | tkerber | def call_fmin(self, fmax, steps): |
105 | 1 | tkerber | output = opt.fmin_cg(self.f,
|
106 | 1 | tkerber | self.x0(),
|
107 | 1 | tkerber | fprime=self.fprime,
|
108 | 1 | tkerber | #args=(),
|
109 | 1 | tkerber | gtol=fmax * 0.1, #Should never be reached |
110 | 1 | tkerber | norm=np.inf, |
111 | 1 | tkerber | #epsilon=
|
112 | 1 | tkerber | maxiter=steps, |
113 | 1 | tkerber | full_output=1,
|
114 | 1 | tkerber | disp=0,
|
115 | 1 | tkerber | #retall=0,
|
116 | 1 | tkerber | callback=self.callback
|
117 | 1 | tkerber | ) |
118 | 1 | tkerber | warnflag = output[-1]
|
119 | 1 | tkerber | if warnflag == 2: |
120 | 1 | tkerber | raise OptimizerConvergenceError('Warning: Desired error not necessarily achieved ' \ |
121 | 1 | tkerber | 'due to precision loss')
|
122 | 1 | tkerber | |
123 | 1 | tkerber | class SciPyFminBFGS(SciPyOptimizer): |
124 | 1 | tkerber | """Quasi-Newton method (Broydon-Fletcher-Goldfarb-Shanno)"""
|
125 | 1 | tkerber | def call_fmin(self, fmax, steps): |
126 | 1 | tkerber | output = opt.fmin_bfgs(self.f,
|
127 | 1 | tkerber | self.x0(),
|
128 | 1 | tkerber | fprime=self.fprime,
|
129 | 1 | tkerber | #args=(),
|
130 | 1 | tkerber | gtol=fmax * 0.1, #Should never be reached |
131 | 1 | tkerber | norm=np.inf, |
132 | 1 | tkerber | #epsilon=1.4901161193847656e-08,
|
133 | 1 | tkerber | maxiter=steps, |
134 | 1 | tkerber | full_output=1,
|
135 | 1 | tkerber | disp=0,
|
136 | 1 | tkerber | #retall=0,
|
137 | 1 | tkerber | callback=self.callback
|
138 | 1 | tkerber | ) |
139 | 1 | tkerber | warnflag = output[-1]
|
140 | 1 | tkerber | if warnflag == 2: |
141 | 1 | tkerber | raise OptimizerConvergenceError('Warning: Desired error not necessarily achieved' \ |
142 | 1 | tkerber | 'due to precision loss')
|
143 | 1 | tkerber | |
144 | 1 | tkerber | class SciPyGradientlessOptimizer(Optimizer): |
145 | 1 | tkerber | """General interface for gradient less SciPy optimizers
|
146 | 1 | tkerber |
|
147 | 1 | tkerber | Only the call to the optimizer is still needed
|
148 | 1 | tkerber |
|
149 | 1 | tkerber | Note: If you redefien x0() and f(), you don't even need an atoms object.
|
150 | 1 | tkerber | Redefining these also allows you to specify an arbitrary objective
|
151 | 1 | tkerber | function.
|
152 | 1 | tkerber |
|
153 | 1 | tkerber | XXX: This is still a work in progress
|
154 | 1 | tkerber | """
|
155 | 1 | tkerber | def __init__(self, atoms, logfile='-', trajectory=None, |
156 | 1 | tkerber | callback_always=False):
|
157 | 1 | tkerber | """Parameters:
|
158 | 1 | tkerber |
|
159 | 1 | tkerber | callback_always: book
|
160 | 1 | tkerber | Should the callback be run after each force call (also in the
|
161 | 1 | tkerber | linesearch)
|
162 | 1 | tkerber | """
|
163 | 1 | tkerber | restart = None
|
164 | 1 | tkerber | Optimizer.__init__(self, atoms, restart, logfile, trajectory)
|
165 | 1 | tkerber | self.function_calls = 0 |
166 | 1 | tkerber | self.callback_always = callback_always
|
167 | 1 | tkerber | |
168 | 1 | tkerber | def x0(self): |
169 | 1 | tkerber | """Return x0 in a way SciPy can use
|
170 | 1 | tkerber |
|
171 | 1 | tkerber | This class is mostly usable for subclasses wanting to redefine the
|
172 | 1 | tkerber | parameters (and the objective function)"""
|
173 | 1 | tkerber | return self.atoms.get_positions().reshape(-1) |
174 | 1 | tkerber | |
175 | 1 | tkerber | def f(self, x): |
176 | 1 | tkerber | """Objective function for use of the optimizers"""
|
177 | 1 | tkerber | self.atoms.set_positions(x.reshape(-1, 3)) |
178 | 1 | tkerber | self.function_calls += 1 |
179 | 1 | tkerber | # Scale the problem as SciPy uses I as initial Hessian.
|
180 | 1 | tkerber | return self.atoms.get_potential_energy() |
181 | 1 | tkerber | |
182 | 1 | tkerber | def callback(self, x): |
183 | 1 | tkerber | """Callback function to be run after each iteration by SciPy
|
184 | 1 | tkerber |
|
185 | 1 | tkerber | This should also be called once before optimization starts, as SciPy
|
186 | 1 | tkerber | optimizers only calls it after each iteration, while ase optimizers
|
187 | 1 | tkerber | call something similar before as well.
|
188 | 1 | tkerber | """
|
189 | 1 | tkerber | # We can't assume that forces are available!
|
190 | 1 | tkerber | #f = self.atoms.get_forces()
|
191 | 1 | tkerber | #self.log(f)
|
192 | 1 | tkerber | self.call_observers()
|
193 | 1 | tkerber | #if self.converged(f):
|
194 | 1 | tkerber | # raise Converged
|
195 | 1 | tkerber | self.nsteps += 1 |
196 | 1 | tkerber | |
197 | 1 | tkerber | def run(self, ftol=0.01, xtol=0.01, steps=100000000): |
198 | 1 | tkerber | self.xtol = xtol
|
199 | 1 | tkerber | self.ftol = ftol
|
200 | 1 | tkerber | # As SciPy does not log the zeroth iteration, we do that manually
|
201 | 1 | tkerber | self.callback(None) |
202 | 1 | tkerber | try:
|
203 | 1 | tkerber | # Scale the problem as SciPy uses I as initial Hessian.
|
204 | 1 | tkerber | self.call_fmin(xtol, ftol, steps)
|
205 | 1 | tkerber | except Converged:
|
206 | 1 | tkerber | pass
|
207 | 1 | tkerber | |
208 | 1 | tkerber | def dump(self, data): |
209 | 1 | tkerber | pass
|
210 | 1 | tkerber | |
211 | 1 | tkerber | def load(self): |
212 | 1 | tkerber | pass
|
213 | 1 | tkerber | |
214 | 1 | tkerber | def call_fmin(self, fmax, steps): |
215 | 1 | tkerber | raise NotImplementedError |
216 | 1 | tkerber | |
217 | 1 | tkerber | class SciPyFmin(SciPyGradientlessOptimizer): |
218 | 1 | tkerber | """Nelder-Mead Simplex algorithm
|
219 | 1 | tkerber |
|
220 | 1 | tkerber | Uses only function calls.
|
221 | 1 | tkerber |
|
222 | 1 | tkerber | XXX: This is still a work in progress
|
223 | 1 | tkerber | """
|
224 | 1 | tkerber | def call_fmin(self, xtol, ftol, steps): |
225 | 1 | tkerber | output = opt.fmin(self.f,
|
226 | 1 | tkerber | self.x0(),
|
227 | 1 | tkerber | #args=(),
|
228 | 1 | tkerber | xtol=xtol, |
229 | 1 | tkerber | ftol=ftol, |
230 | 1 | tkerber | maxiter=steps, |
231 | 1 | tkerber | #maxfun=None,
|
232 | 1 | tkerber | #full_output=1,
|
233 | 1 | tkerber | disp=0,
|
234 | 1 | tkerber | #retall=0,
|
235 | 1 | tkerber | callback=self.callback
|
236 | 1 | tkerber | ) |
237 | 1 | tkerber | |
238 | 1 | tkerber | class SciPyFminPowell(SciPyGradientlessOptimizer): |
239 | 1 | tkerber | """Powell's (modified) level set method
|
240 | 1 | tkerber |
|
241 | 1 | tkerber | Uses only function calls.
|
242 | 1 | tkerber |
|
243 | 1 | tkerber | XXX: This is still a work in progress
|
244 | 1 | tkerber | """
|
245 | 1 | tkerber | def __init__(self, *args, **kwargs): |
246 | 1 | tkerber | """Parameters:
|
247 | 1 | tkerber |
|
248 | 1 | tkerber | direc: float
|
249 | 1 | tkerber | How much to change x to initially. Defaults to 0.04.
|
250 | 1 | tkerber | """
|
251 | 1 | tkerber | direc = kwargs.pop('direc', None) |
252 | 1 | tkerber | SciPyGradientlessOptimizer.__init__(self, *args, **kwargs)
|
253 | 1 | tkerber | |
254 | 1 | tkerber | if direc is None: |
255 | 1 | tkerber | self.direc = np.eye(len(self.x0()), dtype=float) * 0.04 |
256 | 1 | tkerber | else:
|
257 | 1 | tkerber | self.direc = np.eye(len(self.x0()), dtype=float) * direc |
258 | 1 | tkerber | |
259 | 1 | tkerber | def call_fmin(self, xtol, ftol, steps): |
260 | 1 | tkerber | output = opt.fmin_powell(self.f,
|
261 | 1 | tkerber | self.x0(),
|
262 | 1 | tkerber | #args=(),
|
263 | 1 | tkerber | xtol=xtol, |
264 | 1 | tkerber | ftol=ftol, |
265 | 1 | tkerber | maxiter=steps, |
266 | 1 | tkerber | #maxfun=None,
|
267 | 1 | tkerber | #full_output=1,
|
268 | 1 | tkerber | disp=0,
|
269 | 1 | tkerber | #retall=0,
|
270 | 1 | tkerber | callback=self.callback,
|
271 | 1 | tkerber | direc=self.direc
|
272 | 1 | tkerber | ) |