def 158 f0 40 return sum for in range f0 def grad 158 return np array

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
n = 2
def f(x, n=2, a=158, b=2, f0=40):
return sum([a * (x[i] ** 2 - x[i + 1]) ** 2 + b * (x[i] - 1) ** 2 for i in range(0, n - 1)]) + f0
def grad(x, a=158, b=2):
return np.array([4 * a * x[0] * (x[0] ** 2 - x[1]) + 2 * b * (x[0] - 1),
-2 * a * (x[0] ** 2 - x[1])])
xx = Symbol('x')
yy = Symbol('y')
my_function = 158*((xx**2-yy)**2)+2*((xx-1)**2)+40
syms = [xx, yy]
print(hessian(my_function, syms))
def Hessian(x):
return np.matrix([[1896*x[0]**2 - 632*x[1] + 4, -632*x[0]],
[-632*x[0], 316]])
def LevenbergMarkvardt(x, step = 0, m = 10000, eps = 0.01, M = 1000):
d = grad(x)
if np.linalg.norm(d) < eps or step > M:
return f(x), x, step
while True:
#d = -(Hessian(x)+m*np.ones([n, n])).I * d
d = np.matmul(-(Hessian(x) + m * np.ones([n, n])).I, d.transpose())
new_x = x + d
print(d, x)
if f(new_x) < f(x):
return LevenbergMarkvardt(np.array(new_x)[0], step+1, m / 2.)
m *= 2
def DFP(x, step = 0, x_prev = 0, G = np.ones([n, n]), eps1 = 0.001, eps2 = 0.001, delta = 0.001, m = 1000):
d = -grad(x)
if np.linalg.norm(d) < eps1 or step > m:
return f(x), x, step
if step > 0:
delta_g = grad(x) - grad(x_prev)
delta_x = x - x_prev
G += np.dot(delta_x, delta_x.T)/np.dot(delta_x.T, delta_g) - \
np.dot(np.dot(np.dot(G, delta_g), delta_g.T), G.T)/np.dot(np.dot(delta_g.T, G), delta_g)
d = np.dot(-G, grad(x))
alpha = minimize_scalar(lambda alpha: f(x + alpha*d), method = 'Golden')
new_x = x + alpha.x*d
if abs(np.linalg.norm(x - new_x)) < delta and abs(f(x) - f(new_x)) < eps2:
return f(new_x), new_x, step
return DFP(new_x, step+1, x, G)
start_point = np.random.random(n)
print("Метод Левенберга Марквардта: " + str(LevenbergMarkvardt(start_point)))
print("Метод Девидона-Флетчера-Пауэлла: "+str(DFP(start_point))) #НЕ РАБОТАЕТ!!!!