import math
import numpy as np
from numpy.linalg import inv
def rosenbrock(x):
return 70 * (x[0]**2 - x[1])**2 + 5 * (x[0] - 1)**2 + 70 * (x[1]**2 - x[2])**2 + 5 * (x[1] - 1)**2 + 30
def g1(x):
return x[0] ** 2 + x[1] ** 2 - 3 * x[2] - 5
def g2(x):
return - x[0]
def g3(x):
return - x[1]
def g4(x):
return - x[2]
def derX1(x):
return 280 * (x[0]**2 - x[1]) * x[0] + 10 * (x[1] - 1)
def derX2(x):
return -140 * (x[0]**2 - x[1]) + 280 * (x[1]**2 - x[2]) * x[1] + 10 * (x[1] - 1)
def derX3(x):
return -140 * (x[1]**2 - x[2])
def grad(x):
return np.array([derX1(x), derX2(x), derX3(x)])
def norma(x):
return math.sqrt(x[0]**2 + x[1]**2 + x[2]**2)
def derX1g1(x):
return 2. * x[0]
def derX2g1(x):
return 2. * x[1]
def derX3g1(x):
return -3.
def derg1(x):
return np.array([derX1g1(x), derX2g1(x), derX3g1(x)])
def gradF(x, r):
gf = grad(x)
gg1 = derg1(x)
gg2 = np.array([-1, 0, 0])
gg3 = np.array([0, -1, 0])
gg4 = np.array([0, 0, -1])
return gf + r / 2 * (gg1 + gg2 + gg3 + gg4)
def gradFIn(x, r):
gf = grad(x)
gg1 = -derg1(x) / g1(x)**2
gg2 = -np.array([-1, 0, 0]) / g2(x) ** 2
gg3 = -np.array([0, -1, 0]) / g3(x) ** 2
gg4 = -np.array([0, 0, -1]) / g4(x) ** 2
return gf - r * (gg1 + gg2 + gg3 + gg4)
#print gradFIn(np.array([1., 1., 1.]), 1)
def fffq(l0):
a = [1., 1.]
while a[-1] < l0:
a.append(a[-1] + a[-2])
a.append(a[-1] + a[-2])
return a
def fibchiq(a, b, epsilon, q):
fib = fffq(math.fabs(b - a)/epsilon)
n = len(fib) - 1
midL = a + fib[n - 2] * (b - a) / fib[n]
midR = a + fib[n - 1] * (b - a) / fib[n];
while n != 2:
if q(midL) <= q(midR):
b = midR
midR = midL
midL = a + fib[n - 2] * (b - a) / fib[n]
else:
a = midL
midL = midR
midR = a + fib[n - 1] * (b - a) / fib[n]
n -= 1
return (a + b)/2
def FletcherReeves(x0, e1, e2, sigma, M, func, F, r):
dk_1 = -gradF(x0, r)
dk = 0
xk_1 = x0
xk = 0
xk1 = 0
wk_1 = 0
k = 0
def q(a):
return F(xk_1 + a * dk_1)
alphaK = func(-1000, 1000, e1 / 10000, q)
xk = xk_1 + alphaK * dk_1
while 1:
#print xk_1, xk, xk1
if norma(gradF(xk, r)) <= e1:
return xk, F(xk)
if k >= M:
return xk, F(xk)
if norma(xk_1) == 0:
wk_1 = 0
else:
wk_1 = norma(gradF(xk, r)) ** 2 / norma(gradF(xk_1, r)) ** 2
dk = -gradF(xk, r) + wk_1 * dk_1
def q(a):
return F(xk + a * dk)
alphaK = func(-1000, 1000, e1 / 10000, q)
xk1 = xk + alphaK * dk
if norma(xk1 - xk) < sigma and math.fabs(F(xk1) - F(xk)) < e2:
return xk, F(xk)
k += 1
xk_1 = xk
xk = xk1
dk_1 = dk
#print FletcherReeves(np.array([0., 0., 0.]), 0.1, 0.001, 0.001, 25, fibchiq)
def penaltyOut(x0, e, r0 = 1.0, C = 4):
z = 0
r = r0
x = x0
k = 0
def P(x):
return r / 2 * np.array([max(0, g1(x))**2, \
max(0, g2(x))**2, \
max(0, g3(x))**2, \
max(0, g4(x))**2]).sum()
def F(x):
return rosenbrock(x) + P(x)
while 1:
minFx, minFy = FletcherReeves(x, e / 100, 0.001, 0.001, 25, fibchiq, F, r)
p = P(minFx)
if p <= e:
return minFx, rosenbrock(minFx)
r *= C
x = minFx
k += 1
xxxOut = np.array([3., 0., 0.])
print "PenaltyOut:"
print penaltyOut(xxxOut, 0.1)
%timeit penaltyOut(xxxOut, 0.1)
def gdm(pointsX, lambdaa0, epsilon, delta, r, F):
xk = pointsX
lambdaa = lambdaa0
while True:
vectorGrad = gradFIn(xk, r)
Norma = norma(vectorGrad)
if Norma != 0:
lambdaaGrad = [lambdaa * (x / Norma) for x in vectorGrad]
else:
lambdaaGrad = [lambdaa * x for x in vectorGrad]
xk1 = []
for i in range(len(xk)):
xk1.append(xk[i] - lambdaaGrad[i])
if math.fabs(F(xk1) - F(xk)) <= epsilon:
return xk1
if F(xk1) >= F(xk) - epsilon * lambdaa * Norma ** 2:
lambdaa *= delta
xk = xk1
def PenaltyIn(x0, e, r0 = 1., C = 10.):
z = 1
x = x0
r = r0
k = 0
def P(x):
return - r * np.array([1. / g1(x), 1. / g2(x), 1. / g3(x), 1. / g4(x)]).sum()
def F(x):
return rosenbrock(x) + P(x)
while 1:
minFx = gdm(x, 1, e / 100 , 0.95, r, F)
minFy = F(minFx)
p = P(minFx)
#print minFx, minFy, r, p
if math.fabs(p) <= e:
return minFx, rosenbrock(minFx)
r /= C
x = minFx
k += 1
xxxIn = np.array([1., 1., 1.])
print "PenaltyIn:"
print PenaltyIn(xxxIn, 0.1)
%timeit PenaltyIn(xxxIn, 0.1)
def gradFL(x, r, m):
gf = grad(x)
gg1 = derg1(x)
gg2 = np.array([-1, 0, 0])
gg3 = np.array([0, -1, 0])
gg4 = np.array([0, 0, -1])
return gf + gg1 + gg2 + gg3 + gg4
def gdm1(pointsX, lambdaa0, epsilon, delta, r, F, m):
xk = pointsX
lambdaa = lambdaa0
while True:
vectorGrad = gradFL(xk, r, m)
Norma = norma(vectorGrad)
if Norma != 0:
lambdaaGrad = [lambdaa * (x / Norma) for x in vectorGrad]
else:
lambdaaGrad = [lambdaa * x for x in vectorGrad]
xk1 = []
for i in range(len(xk)):
xk1.append(xk[i] - lambdaaGrad[i])
if math.fabs(F(xk1) - F(xk)) <= epsilon:
return xk1
if F(xk1) >= F(xk) - epsilon * lambdaa * Norma ** 2:
lambdaa *= delta
xk = xk1
def L(x0, e, m0, r0 = 0.1, C = 4):
x = x0
r = r0
m = m0
k = 0
def P(x):
return 1 / (2 * r) * np.array([max(0, m[0] + r * g1(x))**2 - m[0]**2, \
max(0, m[1] + r * g2(x))**2 - m[1]**2, \
max(0, m[2] + r * g3(x))**2 - m[2]**2, \
max(0, m[3] + r * g4(x))**2 - m[3]**2]).sum()
#r / 2 * np.array([g1(x)**2, g2(x)**2, g3(x)**2, g4(x)**2]).sum()
def L(x):
return rosenbrock(x) + P(x) #l * np.array([g1(x), g2(x), g3(x), g4(x)]).sum()
while 1:
minFx = gdm1(x, 1, e / 100 , 0.95, r, L, m)
minFy = L(minFx)
p = P(minFx)
if p <= e:
return minFx, rosenbrock(minFx)
r *= C
m = np.array([max(0, m[0] + r * g1(minFx)), max(0, m[1] + r * g2(minFx)), \
max(0, m[2] + r * g3(minFx)), max(0, m[3] + r * g4(minFx))])
x = minFx
k += 1
xxxIn = np.array([3., 0., 0.])
print "Lagran:"
print L(xxxIn, 0.1, np.array([0.0, 0.0, 0.0, 0.0]))
%timeit L(xxxIn, 0.1, np.array([0.0, 0.0, 0.0, 0.0]))