# First submit of an algorithm

 ``` 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169``` ```import math import numpy as np import mpmath as mp import matplotlib.pyplot as plt from matplotlib import cm from mpmath import mpf, workdps from mpl_toolkits.mplot3d import Axes3D def ethalon_function(x, y): return np.exp(-x**2)*x**2 + np.exp(-y**2)*y**2 def fdx(x, y): return 2*x*np.exp(-x**2)*(-x**2 + 1) def fdy(x, y): return 2*y*np.exp(-y**2)*(-y**2 + 1) L = 0.6 left = -2.0 right = 2.0 discr = 100 x_grid = np.linspace(left, right, discr, endpoint=False) y_grid = np.linspace(left, right, discr, endpoint=False) x_mesh, y_mesh = np.meshgrid(x_grid, y_grid) values_mesh = ethalon_function(x_mesh, y_mesh) #plt.contourf(x_mesh, y_mesh, values_mesh) #plt.show() # returns smallest power of 2 that is greater or equal than v def power_2_bound(v): v -= 1 v |= v >> 1 v |= v >> 2 v |= v >> 4 v |= v >> 8 v |= v >> 16 return v + 1 # Only for N=2 function! def nth(n): return 2**(n+1) - 3 def eps_generator(gamma): coeff = 1.0 * (gamma - 3) / (gamma - 1) def eps(k): return coeff * gamma ** (-k) return eps epsilon = eps_generator(8.0) def betas_generate(n): return [0] + [nth(i) for i in xrange(1, n)] # find_minimum_k def find_minimum_k(delta_f, gamma = mpf(8.0), k_last=None): # delta func delta_f = lambda k: (gamma - 2) / (gamma - 1) * gamma**(-k) if k_last == None: return 1 assert k_last > 0 return max(mp.ceil(-mp.log(delta_f(k_last)/C, 2)), \ k_last + 1) def search_optimal_k(inequality, low, high): while True: if high == low: return low if high == low + 1: if inequality(epsilon(low) * L, low): return low else: return high middle = mp.floor((high + low) / 2) modulo = epsilon(middle) * L if inequality(modulo, middle): high = middle #return search_optimal_k(inequality, modulo, middle) else: low = middle + 1 #return search_optimal_k(inequality, middle + 1, high) def find_optimal_k(inequality, k_last=None): k_next = find_minimum_k(k_last) valid = k_next i = 1 while True: modulo = epsilon(k_next) * L if inequality(modulo, k_next): print 'k_next =', k_next k_returned = search_optimal_k(inequality, valid, k_next) print 'k_returned =', k_returned return epsilon(k_returned) * L, k_returned print 'reached_k:', k_next k_next *= 2**i i += 1 def k_finder(values, betas, L, C=2, N=2, amount_of_k=3, dps=4096): beta_map = {} max_size = amount_of_k + 1 with workdps(dps): # find proper gamma for N power_of_two = power_2_bound(2*N + 3) gamma_pow = int(math.log(power_of_two, 2)) gamma = mpf(power_of_two) # define a_value function A_CONST = mpf(C) * (2*N + 1) / (N + 1) a_value_empty = lambda r: A_CONST * gamma**beta_map[kseq[r-1]] * mpf(2) a_value = lambda n, r: A_CONST * gamma**beta_map[kseq[r-1]] * mpf(2)**(1 - kseq[n-1]) estimated_norm = mpf(np.max(np.abs(values))) bseq = [estimated_norm] # First iteration - very simple inequality = lambda modulo, k: modulo <= estimated_norm / mpf(2*N + 2) b_one, k_one = find_optimal_k(inequality) kseq = [k_one] beta_map[k_one] = nth(k_one) bseq.append(b_one) # Now, make a_nr and c_nj sequences c_nj = [[0]*max_size for _ in xrange(max_size)] a_nr = [[0]*max_size for _ in xrange(max_size)] for i in xrange(max_size): a_nr[i][i] = mpf(N) / mpf(N + 1) a_nr[i][0] = mpf('1') c_nj[i][i] = mpf('1') for j in xrange(i + 1, max_size): a_nr[i][j] = None a_nr[1][0] = a_value(1, 0) c_nj[1][0] = a_nr[1][1] * c_nj[0][0] partial_sums = [] # Next iterations for t in xrange(2, max_size): j_max = t - 1 for r in xrange(t): a_nr[t][r] = a_value_empty(r) ssum = sum([c_nj[j_max - 1][k] * bseq[k] for k in xrange(j_max)]) partial_sums.append(a_nr[t][j_max] * ssum) second_part = sum([c_nj[j_max][j] * bseq[j] for j in xrange(t)]) / (2*N + 2) #print 'SP:', second_part inequality = lambda modulo, k: (modulo + sum(partial_sums) / (2**(k))) <= second_part b_next, k_next = find_optimal_k(inequality, kseq[-1]) kseq.append(k_next) bseq.append(b_next) beta_map[k_next] = nth(k_next) print b_next, k_next for r in xrange(t): a_nr[t][r] /= 2**(k_next) for j in xrange(t - 1, -1, -1): c_nj[t][j] = sum([a_nr[t][k] * c_nj[k-1][j] for k in xrange(j+1, t + 1)]) return kseq, bseq, a_nr, c_nj ```