Statistiques
| Révision :

root / ase / optimize / bfgslinesearch.py @ 3

Historique | Voir | Annoter | Télécharger (14,89 ko)

1 1 tkerber
#__docformat__ = "restructuredtext en"
2 1 tkerber
# ******NOTICE***************
3 1 tkerber
# optimize.py module by Travis E. Oliphant
4 1 tkerber
#
5 1 tkerber
# You may copy and use this module as you see fit with no
6 1 tkerber
# guarantee implied provided you keep this notice in all copies.
7 1 tkerber
# *****END NOTICE************
8 1 tkerber
9 1 tkerber
import numpy as np
10 1 tkerber
from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, empty, \
11 1 tkerber
     squeeze, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf
12 1 tkerber
from ase.utils.linesearch import LineSearch
13 1 tkerber
from ase.optimize.optimize import Optimizer
14 1 tkerber
from numpy import arange
15 1 tkerber
16 1 tkerber
17 1 tkerber
# These have been copied from Numeric's MLab.py
18 1 tkerber
# I don't think they made the transition to scipy_core
19 1 tkerber
20 1 tkerber
# Modified from scipy_optimize
21 1 tkerber
abs = absolute
22 1 tkerber
import __builtin__
23 1 tkerber
pymin = __builtin__.min
24 1 tkerber
pymax = __builtin__.max
25 1 tkerber
__version__="0.1"
26 1 tkerber
27 1 tkerber
class BFGSLineSearch(Optimizer):
28 1 tkerber
    def __init__(self, atoms, restart=None, logfile='-', maxstep=.2,
29 1 tkerber
                 trajectory=None, c1=.23, c2=0.46, alpha=10., stpmax=50.):
30 1 tkerber
        """Minimize a function using the BFGS algorithm.
31 1 tkerber

32 1 tkerber
        Notes:
33 1 tkerber

34 1 tkerber
            Optimize the function, f, whose gradient is given by fprime
35 1 tkerber
            using the quasi-Newton method of Broyden, Fletcher, Goldfarb,
36 1 tkerber
            and Shanno (BFGS) See Wright, and Nocedal 'Numerical
37 1 tkerber
            Optimization', 1999, pg. 198.
38 1 tkerber

39 1 tkerber
        *See Also*:
40 1 tkerber

41 1 tkerber
          scikits.openopt : SciKit which offers a unified syntax to call
42 1 tkerber
                            this and other solvers.
43 1 tkerber

44 1 tkerber
        """
45 1 tkerber
        self.maxstep = maxstep
46 1 tkerber
        self.stpmax = stpmax
47 1 tkerber
        self.alpha = alpha
48 1 tkerber
        self.H = None
49 1 tkerber
        self.c1 = c1
50 1 tkerber
        self.c2 = c2
51 1 tkerber
        self.force_calls = 0
52 1 tkerber
        self.function_calls = 0
53 1 tkerber
        self.r0 = None
54 1 tkerber
        self.g0 = None
55 1 tkerber
        self.e0 = None
56 1 tkerber
        self.load_restart = False
57 1 tkerber
        self.task = 'START'
58 1 tkerber
        self.rep_count = 0
59 1 tkerber
        self.p = None
60 1 tkerber
        self.alpha_k = None
61 1 tkerber
        self.no_update = False
62 1 tkerber
        self.replay = False
63 1 tkerber
64 1 tkerber
        Optimizer.__init__(self, atoms, restart, logfile, trajectory)
65 1 tkerber
66 1 tkerber
    def read(self):
67 1 tkerber
        self.r0, self.g0, self.e0, self.task, self.H = self.load()
68 1 tkerber
        self.load_restart = True
69 1 tkerber
70 1 tkerber
    def reset(self):
71 1 tkerber
        print 'reset'
72 1 tkerber
        self.H = None
73 1 tkerber
        self.r0 = None
74 1 tkerber
        self.g0 = None
75 1 tkerber
        self.e0 = None
76 1 tkerber
        self.rep_count = 0
77 1 tkerber
78 1 tkerber
79 1 tkerber
    def step(self, f):
80 1 tkerber
        atoms = self.atoms
81 1 tkerber
        r = atoms.get_positions()
82 1 tkerber
        r = r.reshape(-1)
83 1 tkerber
        g = -f.reshape(-1) / self.alpha
84 1 tkerber
        #g = -f.reshape(-1)
85 1 tkerber
        p0 = self.p
86 1 tkerber
        self.update(r, g, self.r0, self.g0, p0)
87 1 tkerber
        e = atoms.get_potential_energy() / self.alpha
88 1 tkerber
        #e = self.func(r)
89 1 tkerber
90 1 tkerber
        self.p = -np.dot(self.H,g)
91 1 tkerber
        p_size = np.sqrt((self.p **2).sum())
92 1 tkerber
        if self.nsteps != 0:
93 1 tkerber
            p0_size = np.sqrt((p0 **2).sum())
94 1 tkerber
            delta_p = self.p/p_size + p0/p0_size
95 1 tkerber
        if p_size <= np.sqrt(len(atoms) * 1e-10):
96 1 tkerber
            self.p /= (p_size / np.sqrt(len(atoms)*1e-10))
97 1 tkerber
        ls = LineSearch()
98 1 tkerber
        self.alpha_k, e, self.e0, self.no_update = \
99 1 tkerber
           ls._line_search(self.func, self.fprime, r, self.p, g, e, self.e0,
100 1 tkerber
                           maxstep=self.maxstep, c1=self.c1,
101 1 tkerber
                           c2=self.c2, stpmax=self.stpmax)
102 1 tkerber
        #if alpha_k is None:  # line search failed try different one.
103 1 tkerber
        #    alpha_k, fc, gc, e, e0, gfkp1 = \
104 1 tkerber
        #             line_search(self.func, self.fprime,r,p,g,
105 1 tkerber
        #                         e,self.e0)
106 1 tkerber
        #if abs(e - self.e0) < 0.000001:
107 1 tkerber
        #    self.rep_count += 1
108 1 tkerber
        #else:
109 1 tkerber
        #    self.rep_count = 0
110 1 tkerber
111 1 tkerber
        #if (alpha_k is None) or (self.rep_count >= 3):
112 1 tkerber
        #    # If the line search fails, reset the Hessian matrix and
113 1 tkerber
        #    # start a new line search.
114 1 tkerber
        #    self.reset()
115 1 tkerber
        #    return
116 1 tkerber
117 1 tkerber
        dr = self.alpha_k * self.p
118 1 tkerber
        atoms.set_positions((r+dr).reshape(len(atoms),-1))
119 1 tkerber
        self.r0 = r
120 1 tkerber
        self.g0 = g
121 1 tkerber
        self.dump((self.r0, self.g0, self.e0, self.task, self.H))
122 1 tkerber
123 1 tkerber
    def update(self, r, g, r0, g0, p0):
124 1 tkerber
        self.I = eye(len(self.atoms) * 3, dtype=int)
125 1 tkerber
        if self.H is None:
126 1 tkerber
            self.H = eye(3 * len(self.atoms))
127 1 tkerber
            #self.H = eye(3 * len(self.atoms)) / self.alpha
128 1 tkerber
            return
129 1 tkerber
        else:
130 1 tkerber
            dr = r - r0
131 1 tkerber
            dg = g - g0
132 1 tkerber
            if not ((self.alpha_k > 0 and abs(np.dot(g,p0))-abs(np.dot(g0,p0)) < 0) \
133 1 tkerber
                or self.replay):
134 1 tkerber
                return
135 1 tkerber
            if self.no_update == True:
136 1 tkerber
                print 'skip update'
137 1 tkerber
                return
138 1 tkerber
139 1 tkerber
            try: # this was handled in numeric, let it remaines for more safety
140 1 tkerber
                rhok = 1.0 / (np.dot(dg,dr))
141 1 tkerber
            except ZeroDivisionError:
142 1 tkerber
                rhok = 1000.0
143 1 tkerber
                print "Divide-by-zero encountered: rhok assumed large"
144 1 tkerber
            if isinf(rhok): # this is patch for np
145 1 tkerber
                rhok = 1000.0
146 1 tkerber
                print "Divide-by-zero encountered: rhok assumed large"
147 1 tkerber
            A1 = self.I - dr[:, np.newaxis] * dg[np.newaxis, :] * rhok
148 1 tkerber
            A2 = self.I - dg[:, np.newaxis] * dr[np.newaxis, :] * rhok
149 1 tkerber
            H0 = self.H
150 1 tkerber
            self.H = np.dot(A1, np.dot(self.H, A2)) + rhok * dr[:, np.newaxis] \
151 1 tkerber
                     * dr[np.newaxis, :]
152 1 tkerber
            #self.B = np.linalg.inv(self.H)
153 1 tkerber
            #omega, V = np.linalg.eigh(self.B)
154 1 tkerber
            #eighfile = open('eigh.log','w')
155 1 tkerber
156 1 tkerber
    def func(self, x):
157 1 tkerber
        """Objective function for use of the optimizers"""
158 1 tkerber
        self.atoms.set_positions(x.reshape(-1, 3))
159 1 tkerber
        self.function_calls += 1
160 1 tkerber
        # Scale the problem as SciPy uses I as initial Hessian.
161 1 tkerber
        return self.atoms.get_potential_energy() / self.alpha
162 1 tkerber
        #return self.atoms.get_potential_energy()
163 1 tkerber
164 1 tkerber
    def fprime(self, x):
165 1 tkerber
        """Gradient of the objective function for use of the optimizers"""
166 1 tkerber
        self.atoms.set_positions(x.reshape(-1, 3))
167 1 tkerber
        self.force_calls += 1
168 1 tkerber
        # Remember that forces are minus the gradient!
169 1 tkerber
        # Scale the problem as SciPy uses I as initial Hessian.
170 1 tkerber
        return - self.atoms.get_forces().reshape(-1) / self.alpha
171 1 tkerber
        #return - self.atoms.get_forces().reshape(-1)
172 1 tkerber
173 1 tkerber
    def replay_trajectory(self, traj):
174 1 tkerber
        """Initialize hessian from old trajectory."""
175 1 tkerber
        self.replay = True
176 1 tkerber
        if isinstance(traj, str):
177 1 tkerber
            from ase.io.trajectory import PickleTrajectory
178 1 tkerber
            traj = PickleTrajectory(traj, 'r')
179 1 tkerber
        atoms = traj[0]
180 1 tkerber
        r0 = None
181 1 tkerber
        g0 = None
182 1 tkerber
        for i in range(0, len(traj) - 1):
183 1 tkerber
            r = traj[i].get_positions().ravel()
184 1 tkerber
            g = - traj[i].get_forces().ravel() / self.alpha
185 1 tkerber
            self.update(r, g, r0, g0, self.p)
186 1 tkerber
            self.p = -np.dot(self.H,g)
187 1 tkerber
            r0 = r.copy()
188 1 tkerber
            g0 = g.copy()
189 1 tkerber
        self.r0 = r0
190 1 tkerber
        self.g0 = g0
191 1 tkerber
        #self.r0 = traj[-2].get_positions().ravel()
192 1 tkerber
        #self.g0 = - traj[-2].get_forces().ravel()
193 1 tkerber
194 1 tkerber
def wrap_function(function, args):
195 1 tkerber
    ncalls = [0]
196 1 tkerber
    def function_wrapper(x):
197 1 tkerber
        ncalls[0] += 1
198 1 tkerber
        return function(x, *args)
199 1 tkerber
    return ncalls, function_wrapper
200 1 tkerber
201 1 tkerber
def _cubicmin(a,fa,fpa,b,fb,c,fc):
202 1 tkerber
    # finds the minimizer for a cubic polynomial that goes through the
203 1 tkerber
    #  points (a,fa), (b,fb), and (c,fc) with derivative at a of fpa.
204 1 tkerber
    #
205 1 tkerber
    # if no minimizer can be found return None
206 1 tkerber
    #
207 1 tkerber
    # f(x) = A *(x-a)^3 + B*(x-a)^2 + C*(x-a) + D
208 1 tkerber
209 1 tkerber
    C = fpa
210 1 tkerber
    D = fa
211 1 tkerber
    db = b-a
212 1 tkerber
    dc = c-a
213 1 tkerber
    if (db == 0) or (dc == 0) or (b==c): return None
214 1 tkerber
    denom = (db*dc)**2 * (db-dc)
215 1 tkerber
    d1 = empty((2,2))
216 1 tkerber
    d1[0,0] = dc**2
217 1 tkerber
    d1[0,1] = -db**2
218 1 tkerber
    d1[1,0] = -dc**3
219 1 tkerber
    d1[1,1] = db**3
220 1 tkerber
    [A,B] = np.dot(d1,asarray([fb-fa-C*db,fc-fa-C*dc]).flatten())
221 1 tkerber
    A /= denom
222 1 tkerber
    B /= denom
223 1 tkerber
    radical = B*B-3*A*C
224 1 tkerber
    if radical < 0:  return None
225 1 tkerber
    if (A == 0): return None
226 1 tkerber
    xmin = a + (-B + sqrt(radical))/(3*A)
227 1 tkerber
    return xmin
228 1 tkerber
229 1 tkerber
def _quadmin(a,fa,fpa,b,fb):
230 1 tkerber
    # finds the minimizer for a quadratic polynomial that goes through
231 1 tkerber
    #  the points (a,fa), (b,fb) with derivative at a of fpa
232 1 tkerber
    # f(x) = B*(x-a)^2 + C*(x-a) + D
233 1 tkerber
    D = fa
234 1 tkerber
    C = fpa
235 1 tkerber
    db = b-a*1.0
236 1 tkerber
    if (db==0): return None
237 1 tkerber
    B = (fb-D-C*db)/(db*db)
238 1 tkerber
    if (B <= 0): return None
239 1 tkerber
    xmin = a  - C / (2.0*B)
240 1 tkerber
    return xmin
241 1 tkerber
242 1 tkerber
def zoom(a_lo, a_hi, phi_lo, phi_hi, derphi_lo,
243 1 tkerber
         phi, derphi, phi0, derphi0, c1, c2):
244 1 tkerber
    maxiter = 10
245 1 tkerber
    i = 0
246 1 tkerber
    delta1 = 0.2  # cubic interpolant check
247 1 tkerber
    delta2 = 0.1  # quadratic interpolant check
248 1 tkerber
    phi_rec = phi0
249 1 tkerber
    a_rec = 0
250 1 tkerber
    while 1:
251 1 tkerber
        # interpolate to find a trial step length between a_lo and a_hi
252 1 tkerber
        # Need to choose interpolation here.  Use cubic interpolation and then
253 1 tkerber
        #if the result is within delta * dalpha or outside of the interval
254 1 tkerber
        #bounded by a_lo or a_hi then use quadratic interpolation, if the
255 1 tkerber
        #result is still too close, then use bisection
256 1 tkerber
257 1 tkerber
        dalpha = a_hi-a_lo;
258 1 tkerber
        if dalpha < 0: a,b = a_hi,a_lo
259 1 tkerber
        else: a,b = a_lo, a_hi
260 1 tkerber
261 1 tkerber
        # minimizer of cubic interpolant
262 1 tkerber
        #    (uses phi_lo, derphi_lo, phi_hi, and the most recent value of phi)
263 1 tkerber
        #      if the result is too close to the end points (or out of the
264 1 tkerber
        #         interval) then use quadratic interpolation with phi_lo,
265 1 tkerber
        #         derphi_lo and phi_hi
266 1 tkerber
        #      if the result is stil too close to the end points (or out of
267 1 tkerber
        #         the interval) then use bisection
268 1 tkerber
269 1 tkerber
        if (i > 0):
270 1 tkerber
            cchk = delta1*dalpha
271 1 tkerber
            a_j = _cubicmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi, a_rec,
272 1 tkerber
                            phi_rec)
273 1 tkerber
        if (i==0) or (a_j is None) or (a_j > b-cchk) or (a_j < a+cchk):
274 1 tkerber
            qchk = delta2*dalpha
275 1 tkerber
            a_j = _quadmin(a_lo, phi_lo, derphi_lo, a_hi, phi_hi)
276 1 tkerber
            if (a_j is None) or (a_j > b-qchk) or (a_j < a+qchk):
277 1 tkerber
                a_j = a_lo + 0.5*dalpha
278 1 tkerber
#                print "Using bisection."
279 1 tkerber
#            else: print "Using quadratic."
280 1 tkerber
#        else: print "Using cubic."
281 1 tkerber
282 1 tkerber
        # Check new value of a_j
283 1 tkerber
284 1 tkerber
        phi_aj = phi(a_j)
285 1 tkerber
        if (phi_aj > phi0 + c1*a_j*derphi0) or (phi_aj >= phi_lo):
286 1 tkerber
            phi_rec = phi_hi
287 1 tkerber
            a_rec = a_hi
288 1 tkerber
            a_hi = a_j
289 1 tkerber
            phi_hi = phi_aj
290 1 tkerber
        else:
291 1 tkerber
            derphi_aj = derphi(a_j)
292 1 tkerber
            if abs(derphi_aj) <= -c2*derphi0:
293 1 tkerber
                a_star = a_j
294 1 tkerber
                val_star = phi_aj
295 1 tkerber
                valprime_star = derphi_aj
296 1 tkerber
                break
297 1 tkerber
            if derphi_aj*(a_hi - a_lo) >= 0:
298 1 tkerber
                phi_rec = phi_hi
299 1 tkerber
                a_rec = a_hi
300 1 tkerber
                a_hi = a_lo
301 1 tkerber
                phi_hi = phi_lo
302 1 tkerber
            else:
303 1 tkerber
                phi_rec = phi_lo
304 1 tkerber
                a_rec = a_lo
305 1 tkerber
            a_lo = a_j
306 1 tkerber
            phi_lo = phi_aj
307 1 tkerber
            derphi_lo = derphi_aj
308 1 tkerber
        i += 1
309 1 tkerber
        if (i > maxiter):
310 1 tkerber
            a_star = a_j
311 1 tkerber
            val_star = phi_aj
312 1 tkerber
            valprime_star = None
313 1 tkerber
            break
314 1 tkerber
    return a_star, val_star, valprime_star
315 1 tkerber
316 1 tkerber
def line_search(f, myfprime, xk, pk, gfk, old_fval, old_old_fval,
317 1 tkerber
                args=(), c1=1e-4, c2=0.9, amax=50):
318 1 tkerber
    """Find alpha that satisfies strong Wolfe conditions.
319 1 tkerber

320 1 tkerber
    Parameters:
321 1 tkerber

322 1 tkerber
        f : callable f(x,*args)
323 1 tkerber
            Objective function.
324 1 tkerber
        myfprime : callable f'(x,*args)
325 1 tkerber
            Objective function gradient (can be None).
326 1 tkerber
        xk : ndarray
327 1 tkerber
            Starting point.
328 1 tkerber
        pk : ndarray
329 1 tkerber
            Search direction.
330 1 tkerber
        gfk : ndarray
331 1 tkerber
            Gradient value for x=xk (xk being the current parameter
332 1 tkerber
            estimate).
333 1 tkerber
        args : tuple
334 1 tkerber
            Additional arguments passed to objective function.
335 1 tkerber
        c1 : float
336 1 tkerber
            Parameter for Armijo condition rule.
337 1 tkerber
        c2 : float
338 1 tkerber
            Parameter for curvature condition rule.
339 1 tkerber

340 1 tkerber
    Returns:
341 1 tkerber

342 1 tkerber
        alpha0 : float
343 1 tkerber
            Alpha for which ``x_new = x0 + alpha * pk``.
344 1 tkerber
        fc : int
345 1 tkerber
            Number of function evaluations made.
346 1 tkerber
        gc : int
347 1 tkerber
            Number of gradient evaluations made.
348 1 tkerber

349 1 tkerber
    Notes:
350 1 tkerber

351 1 tkerber
        Uses the line search algorithm to enforce strong Wolfe
352 1 tkerber
        conditions.  See Wright and Nocedal, 'Numerical Optimization',
353 1 tkerber
        1999, pg. 59-60.
354 1 tkerber

355 1 tkerber
        For the zoom phase it uses an algorithm by [...].
356 1 tkerber

357 1 tkerber
    """
358 1 tkerber
359 1 tkerber
    global _ls_fc, _ls_gc, _ls_ingfk
360 1 tkerber
    _ls_fc = 0
361 1 tkerber
    _ls_gc = 0
362 1 tkerber
    _ls_ingfk = None
363 1 tkerber
    def phi(alpha):
364 1 tkerber
        global _ls_fc
365 1 tkerber
        _ls_fc += 1
366 1 tkerber
        return f(xk+alpha*pk,*args)
367 1 tkerber
368 1 tkerber
    if isinstance(myfprime,type(())):
369 1 tkerber
        def phiprime(alpha):
370 1 tkerber
            global _ls_fc, _ls_ingfk
371 1 tkerber
            _ls_fc += len(xk)+1
372 1 tkerber
            eps = myfprime[1]
373 1 tkerber
            fprime = myfprime[0]
374 1 tkerber
            newargs = (f,eps) + args
375 1 tkerber
            _ls_ingfk = fprime(xk+alpha*pk,*newargs)  # store for later use
376 1 tkerber
            return np.dot(_ls_ingfk,pk)
377 1 tkerber
    else:
378 1 tkerber
        fprime = myfprime
379 1 tkerber
        def phiprime(alpha):
380 1 tkerber
            global _ls_gc, _ls_ingfk
381 1 tkerber
            _ls_gc += 1
382 1 tkerber
            _ls_ingfk = fprime(xk+alpha*pk,*args)  # store for later use
383 1 tkerber
            return np.dot(_ls_ingfk,pk)
384 1 tkerber
385 1 tkerber
386 1 tkerber
    alpha0 = 0
387 1 tkerber
    phi0 = old_fval
388 1 tkerber
    derphi0 = np.dot(gfk,pk)
389 1 tkerber
390 1 tkerber
    alpha1 = pymin(1.,1.01*2*(phi0-old_old_fval)/derphi0)
391 1 tkerber
392 1 tkerber
    if alpha1 == 0:
393 1 tkerber
        # This shouldn't happen. Perhaps the increment has slipped below
394 1 tkerber
        # machine precision?  For now, set the return variables skip the
395 1 tkerber
        # useless while loop, and raise warnflag=2 due to possible imprecision.
396 1 tkerber
        alpha_star = None
397 1 tkerber
        fval_star = old_fval
398 1 tkerber
        old_fval = old_old_fval
399 1 tkerber
        fprime_star = None
400 1 tkerber
401 1 tkerber
    phi_a1 = phi(alpha1)
402 1 tkerber
    #derphi_a1 = phiprime(alpha1)  evaluated below
403 1 tkerber
404 1 tkerber
    phi_a0 = phi0
405 1 tkerber
    derphi_a0 = derphi0
406 1 tkerber
407 1 tkerber
    i = 1
408 1 tkerber
    maxiter = 10
409 1 tkerber
    while 1:         # bracketing phase
410 1 tkerber
        if alpha1 == 0:
411 1 tkerber
            break
412 1 tkerber
        if (phi_a1 > phi0 + c1*alpha1*derphi0) or \
413 1 tkerber
           ((phi_a1 >= phi_a0) and (i > 1)):
414 1 tkerber
            alpha_star, fval_star, fprime_star = \
415 1 tkerber
                        zoom(alpha0, alpha1, phi_a0,
416 1 tkerber
                             phi_a1, derphi_a0, phi, phiprime,
417 1 tkerber
                             phi0, derphi0, c1, c2)
418 1 tkerber
            break
419 1 tkerber
420 1 tkerber
        derphi_a1 = phiprime(alpha1)
421 1 tkerber
        if (abs(derphi_a1) <= -c2*derphi0):
422 1 tkerber
            alpha_star = alpha1
423 1 tkerber
            fval_star = phi_a1
424 1 tkerber
            fprime_star = derphi_a1
425 1 tkerber
            break
426 1 tkerber
427 1 tkerber
        if (derphi_a1 >= 0):
428 1 tkerber
            alpha_star, fval_star, fprime_star = \
429 1 tkerber
                        zoom(alpha1, alpha0, phi_a1,
430 1 tkerber
                             phi_a0, derphi_a1, phi, phiprime,
431 1 tkerber
                             phi0, derphi0, c1, c2)
432 1 tkerber
            break
433 1 tkerber
434 1 tkerber
        alpha2 = 2 * alpha1   # increase by factor of two on each iteration
435 1 tkerber
        i = i + 1
436 1 tkerber
        alpha0 = alpha1
437 1 tkerber
        alpha1 = alpha2
438 1 tkerber
        phi_a0 = phi_a1
439 1 tkerber
        phi_a1 = phi(alpha1)
440 1 tkerber
        derphi_a0 = derphi_a1
441 1 tkerber
442 1 tkerber
        # stopping test if lower function not found
443 1 tkerber
        if (i > maxiter):
444 1 tkerber
            alpha_star = alpha1
445 1 tkerber
            fval_star = phi_a1
446 1 tkerber
            fprime_star = None
447 1 tkerber
            break
448 1 tkerber
449 1 tkerber
    if fprime_star is not None:
450 1 tkerber
        # fprime_star is a number (derphi) -- so use the most recently
451 1 tkerber
        # calculated gradient used in computing it derphi = gfk*pk
452 1 tkerber
        # this is the gradient at the next step no need to compute it
453 1 tkerber
        # again in the outer loop.
454 1 tkerber
        fprime_star = _ls_ingfk
455 1 tkerber
456 1 tkerber
    return alpha_star, _ls_fc, _ls_gc, fval_star, old_fval, fprime_star