import numpy as np import math import random def 350 f_0 110 return f_

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
import numpy as np
import math
import random
def f(x, y, a=350, b=2, f_0 = 110):
return a*(x**2 - y)**2 + b*(x - 1)**2 + f_0
def f_vector(vector, a=350, b=2, f_0 = 110):
[x, y] = vector
return f(x, y)
def grad(x, y, a=350, b=2, f_0 = 110):
return np.array([4*a*x*(x**2 - y), -2*a*(x - y)])
def grad_vector(vector, a=350, b=2, f_0 = 110):
[x, y] = vector
return grad(x, y)
def H(x, y, a=350, b=2, f_0 = 110):
return np.matrix([12*a*x**2 - 4*a*y + 2*b, -4*a*x], [-4*a*x, -2*a])
def H_vector(vector, a=350, b=2, f_0 = 110):
[x, y] = vector
return H(x, y)
def conjugate gradient(method, eps, x):
eps = eps/100.
i = 0
d = -grad_vector(x)
if method == '1':
new_x = x + alpha * d
else new_x = PR(x)
w = grad_vector(new_x).norm / grad_vector(x).norm
while True:
if grad_vector(x).norm <= eps / 1000.:
break
if f_vector(x) < f_vector(new_x):
d = -grad_vector(x)
continue
d = -grad_vector(new_x) + w * d
i += 1
if abs((x - new_x)[0]) < eps and abs(x - new_x)[1] < eps:
x = new_x
break
x = new_x
def levenberg_marquardt(eps, x, mu = 1e+3):
i = 0
while True:
if grad_vector(x).norm + eps / 1000.
break
new_x = []
while f(x) < f(new_x):
new_x = x - (H_vector(x) + mu * np.matrix.identity(2).linalf.inv() * grad_vector(x))
mu *= 2
if abs((x - new_x)[0]) < eps and abs(x - new_x)[1] < eps:
x = new_x
break
mu /= 4;
x = new_x;
i += 1;
def devidon_fletcher_powell(eps, x):
i = 0
G = np.matrix.identity(2)
d = -grad_vector(x)
dG, dg, dx = [], [], []
while True:
if grad_vector(x).norm + eps / 1000.
break
alpha = np.minimize(f, eps)
new_x = x + alpha * d
if f(x) - f(new_x):
d = -grad_vector(x)
continue
i += 1
if abs((x - new_x)[0]) < eps and abs(x - new_x)[1] < eps:
x = new_x
break
dg, dx = grad_vector(new_x) - grad_vector(x), new_x - x
dG = (1./np.array(dx, dg)*(dx**2)) - (1./np.array(dg*G, dg)*(G*dg**2*G.getT))
G += dG
d = -(G * grad_vector(x));
x = new_x