import numpy as np
import math
import random
def f(x, y, a=350, b=2, f_0 = 110):
return a*(x**2 - y)**2 + b*(x - 1)**2 + f_0
def f_vector(vector, a=350, b=2, f_0 = 110):
[x, y] = vector
return f(x, y)
def grad(x, y, a=350, b=2, f_0 = 110):
return np.array([4*a*x*(x**2 - y), -2*a*(x - y)])
def grad_vector(vector, a=350, b=2, f_0 = 110):
[x, y] = vector
return grad(x, y)
def H(x, y, a=350, b=2, f_0 = 110):
return np.matrix([12*a*x**2 - 4*a*y + 2*b, -4*a*x], [-4*a*x, -2*a])
def H_vector(vector, a=350, b=2, f_0 = 110):
[x, y] = vector
return H(x, y)
def conjugate gradient(method, eps, x):
eps = eps/100.
i = 0
d = -grad_vector(x)
if method == '1':
new_x = x + alpha * d
else new_x = PR(x)
w = grad_vector(new_x).norm / grad_vector(x).norm
while True:
if grad_vector(x).norm <= eps / 1000.:
break
if f_vector(x) < f_vector(new_x):
d = -grad_vector(x)
continue
d = -grad_vector(new_x) + w * d
i += 1
if abs((x - new_x)[0]) < eps and abs(x - new_x)[1] < eps:
x = new_x
break
x = new_x
def levenberg_marquardt(eps, x, mu = 1e+3):
i = 0
while True:
if grad_vector(x).norm + eps / 1000.
break
new_x = []
while f(x) < f(new_x):
new_x = x - (H_vector(x) + mu * np.matrix.identity(2).linalf.inv() * grad_vector(x))
mu *= 2
if abs((x - new_x)[0]) < eps and abs(x - new_x)[1] < eps:
x = new_x
break
mu /= 4;
x = new_x;
i += 1;
def devidon_fletcher_powell(eps, x):
i = 0
G = np.matrix.identity(2)
d = -grad_vector(x)
dG, dg, dx = [], [], []
while True:
if grad_vector(x).norm + eps / 1000.
break
alpha = np.minimize(f, eps)
new_x = x + alpha * d
if f(x) - f(new_x):
d = -grad_vector(x)
continue
i += 1
if abs((x - new_x)[0]) < eps and abs(x - new_x)[1] < eps:
x = new_x
break
dg, dx = grad_vector(new_x) - grad_vector(x), new_x - x
dG = (1./np.array(dx, dg)*(dx**2)) - (1./np.array(dg*G, dg)*(G*dg**2*G.getT))
G += dG
d = -(G * grad_vector(x));
x = new_x