root / ase / optimize / lbfgs.py @ 1
Historique | Voir | Annoter | Télécharger (10,85 ko)
1 |
# -*- coding: utf-8 -*-
|
---|---|
2 |
import sys |
3 |
import numpy as np |
4 |
from ase.optimize.optimize import Optimizer |
5 |
from ase.utils.linesearch import LineSearch |
6 |
|
7 |
class LBFGS(Optimizer): |
8 |
"""Limited memory BFGS optimizer.
|
9 |
|
10 |
A limited memory version of the bfgs algorithm. Unlike the bfgs algorithm
|
11 |
used in bfgs.py, the inverse of Hessian matrix is updated. The inverse
|
12 |
Hessian is represented only as a diagonal matrix to save memory
|
13 |
|
14 |
"""
|
15 |
def __init__(self, atoms, restart=None, logfile='-', trajectory=None, |
16 |
maxstep=None, memory=100, damping = 1.0, alpha = 10.0, |
17 |
use_line_search=False):
|
18 |
"""
|
19 |
Parameters:
|
20 |
|
21 |
restart: string
|
22 |
Pickle file used to store vectors for updating the inverse of Hessian
|
23 |
matrix. If set, file with such a name will be searched and information
|
24 |
stored will be used, if the file exists.
|
25 |
|
26 |
logfile: string
|
27 |
Where should output go. None for no output, '-' for stdout.
|
28 |
|
29 |
trajectory: string
|
30 |
Pickle file used to store trajectory of atomic movement.
|
31 |
|
32 |
maxstep: float
|
33 |
How far is a single atom allowed to move. This is useful for DFT
|
34 |
calculations where wavefunctions can be reused if steps are small.
|
35 |
Default is 0.04 Angstrom.
|
36 |
|
37 |
memory: int
|
38 |
Number of steps to be stored. Default value is 100. Three numpy
|
39 |
arrays of this length containing floats are stored.
|
40 |
|
41 |
damping: float
|
42 |
The calculated step is multiplied with this number before added to
|
43 |
the positions.
|
44 |
|
45 |
alpha: float
|
46 |
Initial guess for the Hessian (curvature of energy surface). A
|
47 |
conservative value of 70.0 is the default, but number of needed
|
48 |
steps to converge might be less if a lower value is used. However,
|
49 |
a lower value also means risk of instability.
|
50 |
|
51 |
"""
|
52 |
Optimizer.__init__(self, atoms, restart, logfile, trajectory)
|
53 |
|
54 |
if maxstep is not None: |
55 |
if maxstep > 1.0: |
56 |
raise ValueError('You are using a much too large value for ' + |
57 |
'the maximum step size: %.1f Angstrom' % maxstep)
|
58 |
self.maxstep = maxstep
|
59 |
else:
|
60 |
self.maxstep = 0.04 |
61 |
|
62 |
self.memory = memory
|
63 |
self.H0 = 1. / alpha # Initial approximation of inverse Hessian |
64 |
# 1./70. is to emulate the behaviour of BFGS
|
65 |
# Note that this is never changed!
|
66 |
self.damping = damping
|
67 |
self.use_line_search = use_line_search
|
68 |
self.p = None |
69 |
self.function_calls = 0 |
70 |
self.force_calls = 0 |
71 |
|
72 |
def initialize(self): |
73 |
"""Initalize everything so no checks have to be done in step"""
|
74 |
self.iteration = 0 |
75 |
self.s = []
|
76 |
self.y = []
|
77 |
self.rho = [] # Store also rho, to avoid calculationg the dot product |
78 |
# again and again
|
79 |
|
80 |
self.r0 = None |
81 |
self.f0 = None |
82 |
self.e0 = None |
83 |
self.task = 'START' |
84 |
self.load_restart = False |
85 |
|
86 |
def read(self): |
87 |
"""Load saved arrays to reconstruct the Hessian"""
|
88 |
self.iteration, self.s, self.y, self.rho, \ |
89 |
self.r0, self.f0, self.e0, self.task = self.load() |
90 |
self.load_restart = True |
91 |
|
92 |
def step(self, f): |
93 |
"""Take a single step
|
94 |
|
95 |
Use the given forces, update the history and calculate the next step --
|
96 |
then take it"""
|
97 |
r = self.atoms.get_positions()
|
98 |
p0 = self.p
|
99 |
|
100 |
self.update(r, f, self.r0, self.f0) |
101 |
|
102 |
s = self.s
|
103 |
y = self.y
|
104 |
rho = self.rho
|
105 |
H0 = self.H0
|
106 |
|
107 |
loopmax = np.min([self.memory, self.iteration]) |
108 |
a = np.empty((loopmax,), dtype=np.float64) |
109 |
|
110 |
### The algorithm itself:
|
111 |
q = - f.reshape(-1)
|
112 |
for i in range(loopmax - 1, -1, -1): |
113 |
a[i] = rho[i] * np.dot(s[i], q) |
114 |
q -= a[i] * y[i] |
115 |
z = H0 * q |
116 |
|
117 |
for i in range(loopmax): |
118 |
b = rho[i] * np.dot(y[i], z) |
119 |
z += s[i] * (a[i] - b) |
120 |
|
121 |
self.p = - z.reshape((-1, 3)) |
122 |
###
|
123 |
|
124 |
g = -f |
125 |
if self.use_line_search == True: |
126 |
e = self.func(r)
|
127 |
self.line_search(r, g, e)
|
128 |
dr = (self.alpha_k * self.p).reshape(len(self.atoms),-1) |
129 |
else:
|
130 |
self.force_calls += 1 |
131 |
self.function_calls += 1 |
132 |
dr = self.determine_step(self.p) * self.damping |
133 |
self.atoms.set_positions(r+dr)
|
134 |
|
135 |
self.iteration += 1 |
136 |
self.r0 = r
|
137 |
self.f0 = -g
|
138 |
self.dump((self.iteration, self.s, self.y, |
139 |
self.rho, self.r0, self.f0, self.e0, self.task)) |
140 |
|
141 |
def determine_step(self, dr): |
142 |
"""Determine step to take according to maxstep
|
143 |
|
144 |
Normalize all steps as the largest step. This way
|
145 |
we still move along the eigendirection.
|
146 |
"""
|
147 |
steplengths = (dr**2).sum(1)**0.5 |
148 |
longest_step = np.max(steplengths) |
149 |
if longest_step >= self.maxstep: |
150 |
dr *= self.maxstep / longest_step
|
151 |
|
152 |
return dr
|
153 |
|
154 |
def update(self, r, f, r0, f0): |
155 |
"""Update everything that is kept in memory
|
156 |
|
157 |
This function is mostly here to allow for replay_trajectory.
|
158 |
"""
|
159 |
if self.iteration > 0: |
160 |
s0 = r.reshape(-1) - r0.reshape(-1) |
161 |
self.s.append(s0)
|
162 |
|
163 |
# We use the gradient which is minus the force!
|
164 |
y0 = f0.reshape(-1) - f.reshape(-1) |
165 |
self.y.append(y0)
|
166 |
|
167 |
rho0 = 1.0 / np.dot(y0, s0)
|
168 |
self.rho.append(rho0)
|
169 |
|
170 |
if self.iteration > self.memory: |
171 |
self.s.pop(0) |
172 |
self.y.pop(0) |
173 |
self.rho.pop(0) |
174 |
|
175 |
|
176 |
def replay_trajectory(self, traj): |
177 |
"""Initialize history from old trajectory."""
|
178 |
if isinstance(traj, str): |
179 |
from ase.io.trajectory import PickleTrajectory |
180 |
traj = PickleTrajectory(traj, 'r')
|
181 |
r0 = None
|
182 |
f0 = None
|
183 |
# The last element is not added, as we get that for free when taking
|
184 |
# the first qn-step after the replay
|
185 |
for i in range(0, len(traj) - 1): |
186 |
r = traj[i].get_positions() |
187 |
f = traj[i].get_forces() |
188 |
self.update(r, f, r0, f0)
|
189 |
r0 = r.copy() |
190 |
f0 = f.copy() |
191 |
self.iteration += 1 |
192 |
self.r0 = r0
|
193 |
self.f0 = f0
|
194 |
|
195 |
def func(self, x): |
196 |
"""Objective function for use of the optimizers"""
|
197 |
self.atoms.set_positions(x.reshape(-1, 3)) |
198 |
self.function_calls += 1 |
199 |
return self.atoms.get_potential_energy() |
200 |
|
201 |
def fprime(self, x): |
202 |
"""Gradient of the objective function for use of the optimizers"""
|
203 |
self.atoms.set_positions(x.reshape(-1, 3)) |
204 |
self.force_calls += 1 |
205 |
# Remember that forces are minus the gradient!
|
206 |
return - self.atoms.get_forces().reshape(-1) |
207 |
|
208 |
def line_search(self, r, g, e): |
209 |
self.p = self.p.ravel() |
210 |
p_size = np.sqrt((self.p **2).sum()) |
211 |
if p_size <= np.sqrt(len(self.atoms) * 1e-10): |
212 |
self.p /= (p_size / np.sqrt(len(self.atoms)*1e-10)) |
213 |
g = g.ravel() |
214 |
r = r.ravel() |
215 |
ls = LineSearch() |
216 |
self.alpha_k, e, self.e0, self.no_update = \ |
217 |
ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0, |
218 |
maxstep=self.maxstep, c1=.23, |
219 |
c2=.46, stpmax=50.) |
220 |
|
221 |
class LBFGSLineSearch(LBFGS): |
222 |
"""This optimizer uses the LBFGS algorithm, but does a line search that fulfills
|
223 |
the Wolff conditions.
|
224 |
"""
|
225 |
|
226 |
def __init__(self, *args, **kwargs): |
227 |
kwargs['use_line_search'] = True |
228 |
LBFGS.__init__(self, *args, **kwargs)
|
229 |
|
230 |
# """Modified version of LBFGS.
|
231 |
#
|
232 |
# This optimizer uses the LBFGS algorithm, but does a line search for the
|
233 |
# minimum along the search direction. This is done by issuing an additional
|
234 |
# force call for each step, thus doubling the number of calculations.
|
235 |
#
|
236 |
# Additionally the Hessian is reset if the new guess is not sufficiently
|
237 |
# better than the old one.
|
238 |
# """
|
239 |
# def __init__(self, *args, **kwargs):
|
240 |
# self.dR = kwargs.pop('dR', 0.1)
|
241 |
# LBFGS.__init__(self, *args, **kwargs)
|
242 |
#
|
243 |
# def update(self, r, f, r0, f0):
|
244 |
# """Update everything that is kept in memory
|
245 |
#
|
246 |
# This function is mostly here to allow for replay_trajectory.
|
247 |
# """
|
248 |
# if self.iteration > 0:
|
249 |
# a1 = abs(np.dot(f.reshape(-1), f0.reshape(-1)))
|
250 |
# a2 = np.dot(f0.reshape(-1), f0.reshape(-1))
|
251 |
# if not (a1 <= 0.5 * a2 and a2 != 0):
|
252 |
# # Reset optimization
|
253 |
# self.initialize()
|
254 |
#
|
255 |
# # Note that the reset above will set self.iteration to 0 again
|
256 |
# # which is why we should check again
|
257 |
# if self.iteration > 0:
|
258 |
# s0 = r.reshape(-1) - r0.reshape(-1)
|
259 |
# self.s.append(s0)
|
260 |
#
|
261 |
# # We use the gradient which is minus the force!
|
262 |
# y0 = f0.reshape(-1) - f.reshape(-1)
|
263 |
# self.y.append(y0)
|
264 |
#
|
265 |
# rho0 = 1.0 / np.dot(y0, s0)
|
266 |
# self.rho.append(rho0)
|
267 |
#
|
268 |
# if self.iteration > self.memory:
|
269 |
# self.s.pop(0)
|
270 |
# self.y.pop(0)
|
271 |
# self.rho.pop(0)
|
272 |
#
|
273 |
# def determine_step(self, dr):
|
274 |
# f = self.atoms.get_forces()
|
275 |
#
|
276 |
# # Unit-vector along the search direction
|
277 |
# du = dr / np.sqrt(np.dot(dr.reshape(-1), dr.reshape(-1)))
|
278 |
#
|
279 |
# # We keep the old step determination before we figure
|
280 |
# # out what is the best to do.
|
281 |
# maxstep = self.maxstep * np.sqrt(3 * len(self.atoms))
|
282 |
#
|
283 |
# # Finite difference step using temporary point
|
284 |
# self.atoms.positions += (du * self.dR)
|
285 |
# # Decide how much to move along the line du
|
286 |
# Fp1 = np.dot(f.reshape(-1), du.reshape(-1))
|
287 |
# Fp2 = np.dot(self.atoms.get_forces().reshape(-1), du.reshape(-1))
|
288 |
# CR = (Fp1 - Fp2) / self.dR
|
289 |
# #RdR = Fp1*0.1
|
290 |
# if CR < 0.0:
|
291 |
# #print "negcurve"
|
292 |
# RdR = maxstep
|
293 |
# #if(abs(RdR) > maxstep):
|
294 |
# # RdR = self.sign(RdR) * maxstep
|
295 |
# else:
|
296 |
# Fp = (Fp1 + Fp2) * 0.5
|
297 |
# RdR = Fp / CR
|
298 |
# if abs(RdR) > maxstep:
|
299 |
# RdR = np.sign(RdR) * maxstep
|
300 |
# else:
|
301 |
# RdR += self.dR * 0.5
|
302 |
# return du * RdR
|
303 |
|
304 |
class HessLBFGS(LBFGS): |
305 |
"""Backwards compatibiliyt class"""
|
306 |
def __init__(self, *args, **kwargs): |
307 |
if 'method' in kwargs: |
308 |
del kwargs['method'] |
309 |
sys.stderr.write('Please use LBFGS instead of HessLBFGS!')
|
310 |
LBFGS.__init__(self, *args, **kwargs)
|
311 |
|
312 |
class LineLBFGS(LBFGSLineSearch): |
313 |
"""Backwards compatibiliyt class"""
|
314 |
def __init__(self, *args, **kwargs): |
315 |
if 'method' in kwargs: |
316 |
del kwargs['method'] |
317 |
sys.stderr.write('Please use LBFGSLineSearch instead of LineLBFGS!')
|
318 |
LBFGSLineSearch.__init__(self, *args, **kwargs)
|