Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- from scipy.optimize import minimize
- import matplotlib.pyplot as plt
- # Constants
- pi = np.pi
- gamma = 0.5772156649
- root_2 = np.sqrt(2)
- root_3 = np.sqrt(3)
- root_5 = np.sqrt(5)
- root_6 = np.sqrt(6)
- phi = (1 + np.sqrt(5)) / 2
- tribonacci = 1.839286755
- e = np.e
- brun = 1.902160583104
- cubit = 0.52375 # Ancient egyptian cubit in meters
- speed_of_light = 299792458 # m/s
- # Pyramid real dimensions
- b_actual = 230.4 # meters
- h_actual = 146.6 # meters
- # Cost function
- def cost_function(b, h):
- s = np.sqrt((b / 2)**2 + h**2)
- a = b / 2
- d = np.sqrt(2 * b**2)
- p = 2 * b
- ssa = np.arctan(h / (b / 2))
- corner_edge_length = np.sqrt(h**2 + (d / 2)**2)
- c1 = pi * d
- c2 = pi * b
- errors = [
- (pi - (p / h))**2,
- (gamma - (p / (h + 2 * d)))**2,
- (root_3 - ((h + 2 * d) / p))**2,
- (root_6 - ((h + 2 * d) / d))**2,
- (root_2 - (d / b))**2,
- (root_5 - ((s + b) / s))**2,
- (tribonacci - ((2 * d + a) / (s + b)))**2,
- (brun - (corner_edge_length / a))**2,
- (phi - 1 - (s / (s + a)))**2,
- (phi - (s / a))**2,
- # (e * 100 - (b / (s / a)) / cubit)**2,
- (e - (ssa / (pi / 2 - ssa) * 2))**2,
- (speed_of_light * 1e-6 - (c1 - c2))**2,
- ]
- return np.mean(errors)
- # Gradient of the cost function using numerical approximation
- def gradient(b, h, delta=1e-5):
- db = (cost_function(b + delta, h) - cost_function(b - delta, h)) / (2 * delta)
- dh = (cost_function(b, h + delta) - cost_function(b, h - delta)) / (2 * delta)
- return np.array([db, dh])
- # Calculate the accuracies of the embedded constants
- def accuracy(real_constant, approx_constant):
- if real_constant == approx_constant:
- return 1
- else:
- max_diff = max(real_constant, approx_constant)
- diff = abs(real_constant - approx_constant)
- accuracy_value = 1 - (diff / max_diff)
- return accuracy_value
- def constant_accuracies(b, h):
- s = np.sqrt((b / 2)**2 + h**2)
- a = b / 2
- d = np.sqrt(2 * b**2)
- p = 2 * b
- ssa = np.arctan(h / (b / 2))
- corner_edge_length = np.sqrt(h**2 + (d / 2)**2)
- c1 = pi * d
- c2 = pi * b
- accuracies = [
- accuracy(pi, (p / h)),
- accuracy(gamma, (p / (h + 2 * d))),
- accuracy(root_3, ((h + 2 * d) / p)),
- accuracy(root_6, ((h + 2 * d) / d)),
- accuracy(root_2, (d / b)),
- accuracy(root_5, ((s + b) / s)),
- accuracy(tribonacci, ((2 * d + a) / (s + b))),
- accuracy(brun, (corner_edge_length / a)),
- accuracy(phi - 1, (s / (s + a))),
- accuracy(phi, (s / a)),
- # accuracy(e * 100, (b / (s / a)) / cubit),
- accuracy(e, (ssa / (pi / 2 - ssa) * 2)),
- accuracy(speed_of_light * 1e-6, c1 - c2),
- ]
- return accuracies
- # Callback function to store the gradient magnitude
- gradient_magnitudes = []
- constant_accuracies_history = []
- nb_iters = 0
- def callback(x):
- global nb_iters
- b, h = x
- grad = gradient(b, h)
- nb_iters += 1
- grad_magnitude = np.linalg.norm(grad)
- gradient_magnitudes.append(grad_magnitude)
- accuracies = constant_accuracies(b, h)
- constant_accuracies_history.append(accuracies)
- # print("Gradient magnitude:", grad_magnitude)
- # print("Accuracies:", accuracies)
- # print()
- # Hyperparameters for optimization below
- max_iter = 50000
- bounds = [(0.1, None), (0.1, None)]
- # L-BFGS-B optimization
- def optimize_bfgs(b_init, h_init, max_iterations=max_iter, tolerance=1e-17):
- x_init = np.array([b_init, h_init])
- result = minimize(
- fun=lambda x: cost_function(x[0], x[1]),
- x0=x_init,
- method='L-BFGS-B',
- jac=lambda x: gradient(x[0], x[1]),
- bounds=bounds,
- callback=callback,
- options={'maxiter': max_iterations, 'ftol': 1e-17, 'gtol': 1e-17,'eps': 0.1},
- tol=tolerance,
- )
- return result.x
- # Initial dimensions
- b_init = 100
- h_init = 100
- def analyze(b=b_init, h=h_init, max_iterations=max_iter, debug=True, method=optimize_bfgs):
- global constant_accuracies_history, gradient_magnitudes, b_actual, h_actual, nb_iters
- constant_accuracies_history = []
- gradient_magnitudes = []
- nb_iters = 0
- # Run BFGS optimization
- x_opt = method(b, h, max_iterations=max_iterations)
- b_opt, h_opt = x_opt
- # Print the optimized dimensions
- if debug:
- print()
- print()
- print()
- print("Optimized base side length (b):", b_opt)
- print("Optimized height (h):", h_opt)
- print()
- # Compare the optimized dimensions to the actual dimensions of the Great Pyramid of Giza
- # Print the real dimensions
- if debug:
- print("Real base side length (b):", b_actual)
- print("Real height (h):", h_actual)
- print()
- ratio_opt = h_opt / b_opt
- ratio_actual = h_actual / b_actual
- ratio_accuracy = (1 - abs(ratio_opt - ratio_actual) / ratio_actual) * 100
- if debug:
- print("Ratio of the optimized dimensions:", ratio_opt)
- print("Ratio of the actual dimensions:", ratio_actual)
- print("Accuracy of the obtained ratio:", ratio_accuracy, "%")
- print()
- accuracy_mean_optimized = 0
- if nb_iters > 0:
- constant_accuracies_history = np.array(constant_accuracies_history)
- nb_constants = constant_accuracies_history[0].size
- accuracies_excellent = np.ones(nb_constants)
- accuracies_actual = np.array(constant_accuracies(b_actual, h_actual))
- accuracies_optimized = np.array(constant_accuracies(b_opt, h_opt))
- accuracy_mean_real_world = np.mean((accuracies_excellent - accuracies_actual)**1)
- accuracy_mean_optimized = np.mean((accuracies_excellent - accuracies_optimized)**1)
- if debug:
- print("Constant accuracies in real word :", np.array2string(accuracies_actual))
- print("Constant accuracies from optimization :", np.array2string(accuracies_optimized))
- print("Constant accuracies delta from real world:", np.array2string(accuracies_optimized - accuracies_actual))
- print()
- print("Constant accuracies mean error in real world :", np.array2string(accuracy_mean_real_world))
- print("Constant accuracies mean error from optimization:", np.array2string(accuracy_mean_optimized))
- # Plot gradient magnitude evolution
- fig, ax = plt.subplots()
- ax.plot(gradient_magnitudes)
- ax.set(xlabel='Iteration', ylabel='Gradient Magnitude', title='Gradient Magnitude Evolution')
- plt.show()
- # Plot constant accuracy evolution
- for i in range(nb_constants):
- plt.plot(constant_accuracies_history[:, i], label=f"Constant {i + 1}")
- plt.xlabel('Iteration')
- plt.ylabel('Accuracy')
- plt.title('Constant Accuracy Evolution')
- plt.legend()
- plt.show()
- return b_opt, h_opt,accuracy_mean_optimized
- # b = b_actual
- # h = h_actual
- # s = np.sqrt((b / 2)**2 + h**2)
- # a = b / 2
- # d = np.sqrt(2 * b**2)
- # p = 2 * b
- # ssa = np.arctan(h / (b / 2))
- # corner_edge_length = np.sqrt(h**2 + (d / 2)**2)
- # print("b", b)
- # print("h", h)
- # print("s", s)
- # print("a", a)
- # print("d", d)
- # print("p", p)
- # print("ssa", np.degrees(ssa))
- # print("corner_edge_length", corner_edge_length)
- # print("(b / (s / a))", (b / (s / a)) / cubit)
- analyze(debug=True)
- print()
- print()
- print("nb iters:", nb_iters)
- accuracies_mean = []
- for i in range(1, nb_iters):
- gradient_magnitudes = []
- constant_accuracies_history = []
- _, _, accuracy_mean = analyze(max_iterations=i, debug=False)
- accuracies_mean.append(accuracy_mean)
- # Plot mean accuracy evolution
- fig, ax = plt.subplots()
- ax.plot(accuracies_mean)
- ax.set(xlabel='Iteration', ylabel='Accuracy Mean', title='Accuracy Evolution')
- plt.show()
- print()
- print("All accuracies")
- print(np.array2string(np.array(accuracies_mean)))
- # b = b_init
- # h = h_init
- # accuracies_mean_extreme = []
- # print()
- # print("Attempting extreme accuracy")
- # for i in range(100):
- # b, h, _ = analyze(b=b, h=h, debug=False)
- # constant_accuracies_history = np.array(constant_accuracies_history)
- # if constant_accuracies_history.size > 0:
- # nb_constants = constant_accuracies_history[0].size
- # accuracies_excellent = np.ones(nb_constants)
- # accuracies_actual = np.array(constant_accuracies(b_actual, h_actual))
- # accuracies_optimized = np.array(constant_accuracies(b, h))
- # accuracy_mean_optimized = np.mean((accuracies_excellent - accuracies_optimized)**1)
- # accuracies_mean_extreme.append(accuracy_mean_optimized)
- # else:
- # accuracies_mean_extreme.append(1)
- # print(" mean accuracy:", accuracy_mean_optimized)
- # # Plot mean extreme accuracy evolution
- # fig, ax = plt.subplots()
- # ax.plot(accuracies_mean_extreme)
- # ax.set(xlabel='Iteration', ylabel='Extreme Accuracy Mean', title='Extreme Accuracy Evolution')
- # plt.show()
- # GPt4 attempt at freestyling the twelfth constant
- # b = 230.4
- # h = 146.6
- # s = np.sqrt((b / 2)**2 + h**2)
- # a = (b**2 + 4 * h**2)**(1 / 2) / 2
- # e_minus_1_approx = b / a
- # print("e - 1 approximation:", e_minus_1_approx)
- # prints: e - 1 approximation: 1.235737850185782
Advertisement
Add Comment
Please, Sign In to add comment