def k_finder(points, values, betas, C=2, N=2, max_size=10, dps=2048): with workdps(dps): # find proper gamma pow_2 = power_2_bound(2*N + 3) gamma_pow = int(math.log(gamma, 2)) gamma = mpf(gamma) # define a_value function a_value_const = mpf(C) * (2*N + 1) / (N + 1) def a_value(n, r): return a_value_const * gamma**betas[ks[r-1]] * mpf(2)**(1 - k[n-1]) # delta func def delta_func(k): return (gamma - 2) / (gamma - 1) * gamma**(-k) def find_min_k(k_prev): k1 = mp.ceil(-mp.log(delta_func(k_prev) / C)) return max(k1, k_prev + 1) estimated_norm = mpf(np.max(np.abs(values))) bs = [estimated_norm] print estimated_norm # First iteration - using simplier strategy inequality = lambda modulo, k: modulo <= estimated_norm / mpf(2*N + 2) ks = [optimal_k_finder(points, values, inequality, 1)] c_nj = [[0]*max_size for _ in xrange(max_size)] a_nr = [[0]*max_size for _ in xrange(max_size)] for i in xrange(max_size): a_nr[i][i] = mpf(N) / mpf(N+1) c_nj[i][i] = mpf('1') a_nr[1][0] = a_value(1, 0) c_nj[1][0] = a_nr[1][1] * c_nj[0][0] partial_sums = [c_nj[0][0]*bs[0]] # next iteraions - more complex, based on previous iterations for i in xrange(1, max_size): min_k = find_min_k(ks[-1])