Guest User

Untitled

a guest
May 3rd, 2023
100
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 9.37 KB | None | 0 0
  1. import numpy as np
  2. from scipy.optimize import minimize
  3. import matplotlib.pyplot as plt
  4.  
  5. # Constants
  6. pi = np.pi
  7. gamma = 0.5772156649
  8. root_2 = np.sqrt(2)
  9. root_3 = np.sqrt(3)
  10. root_5 = np.sqrt(5)
  11. root_6 = np.sqrt(6)
  12. phi = (1 + np.sqrt(5)) / 2
  13. tribonacci = 1.839286755
  14. e = np.e
  15. brun = 1.902160583104
  16. cubit = 0.52375  # Ancient egyptian cubit in meters
  17. speed_of_light = 299792458  # m/s
  18.  
  19. # Pyramid real dimensions
  20. b_actual = 230.4  # meters
  21. h_actual = 146.6  # meters
  22.  
  23.  
  24.  
  25.  
  26. # Cost function
  27. def cost_function(b, h):
  28.     s = np.sqrt((b / 2)**2 + h**2)
  29.     a = b / 2
  30.     d = np.sqrt(2 * b**2)
  31.     p = 2 * b
  32.     ssa = np.arctan(h / (b / 2))
  33.     corner_edge_length = np.sqrt(h**2 + (d / 2)**2)
  34.     c1 = pi * d
  35.     c2 = pi * b
  36.  
  37.     errors = [
  38.         (pi - (p / h))**2,
  39.         (gamma - (p / (h + 2 * d)))**2,
  40.         (root_3 - ((h + 2 * d) / p))**2,
  41.         (root_6 - ((h + 2 * d) / d))**2,
  42.         (root_2 - (d / b))**2,
  43.         (root_5   - ((s + b) / s))**2,
  44.         (tribonacci - ((2 * d + a) / (s + b)))**2,
  45.         (brun - (corner_edge_length / a))**2,
  46.         (phi - 1 - (s / (s + a)))**2,
  47.         (phi - (s / a))**2,
  48.         # (e * 100 - (b / (s / a)) / cubit)**2,
  49.         (e - (ssa / (pi / 2 - ssa) * 2))**2,
  50.         (speed_of_light * 1e-6 - (c1 - c2))**2,
  51.     ]
  52.  
  53.     return np.mean(errors)
  54.  
  55. # Gradient of the cost function using numerical approximation
  56. def gradient(b, h, delta=1e-5):
  57.     db = (cost_function(b + delta, h) - cost_function(b - delta, h)) / (2 * delta)
  58.     dh = (cost_function(b, h + delta) - cost_function(b, h - delta)) / (2 * delta)
  59.    
  60.     return np.array([db, dh])
  61.  
  62. # Calculate the accuracies of the embedded constants
  63. def accuracy(real_constant, approx_constant):
  64.     if real_constant == approx_constant:
  65.         return 1
  66.     else:
  67.         max_diff = max(real_constant, approx_constant)
  68.         diff = abs(real_constant - approx_constant)
  69.         accuracy_value = 1 - (diff / max_diff)
  70.         return accuracy_value
  71.  
  72. def constant_accuracies(b, h):
  73.     s = np.sqrt((b / 2)**2 + h**2)
  74.     a = b / 2
  75.     d = np.sqrt(2 * b**2)
  76.     p = 2 * b
  77.     ssa = np.arctan(h / (b / 2))
  78.     corner_edge_length = np.sqrt(h**2 + (d / 2)**2)
  79.     c1 = pi * d
  80.     c2 = pi * b
  81.  
  82.     accuracies = [
  83.         accuracy(pi, (p / h)),
  84.         accuracy(gamma, (p / (h + 2 * d))),
  85.         accuracy(root_3, ((h + 2 * d) / p)),
  86.         accuracy(root_6, ((h + 2 * d) / d)),
  87.         accuracy(root_2, (d / b)),
  88.         accuracy(root_5, ((s + b) / s)),
  89.         accuracy(tribonacci, ((2 * d + a) / (s + b))),
  90.         accuracy(brun, (corner_edge_length / a)),
  91.         accuracy(phi - 1, (s / (s + a))),
  92.         accuracy(phi, (s / a)),
  93.         # accuracy(e * 100, (b / (s / a)) / cubit),
  94.         accuracy(e, (ssa / (pi / 2 - ssa) * 2)),
  95.         accuracy(speed_of_light * 1e-6, c1 - c2),
  96.     ]
  97.  
  98.     return accuracies
  99.  
  100. # Callback function to store the gradient magnitude
  101. gradient_magnitudes = []
  102. constant_accuracies_history = []
  103. nb_iters = 0
  104.  
  105. def callback(x):
  106.     global nb_iters
  107.     b, h = x
  108.     grad = gradient(b, h)
  109.     nb_iters += 1
  110.     grad_magnitude = np.linalg.norm(grad)
  111.     gradient_magnitudes.append(grad_magnitude)
  112.     accuracies = constant_accuracies(b, h)
  113.     constant_accuracies_history.append(accuracies)
  114.     # print("Gradient magnitude:", grad_magnitude)
  115.     # print("Accuracies:", accuracies)
  116.     # print()
  117.    
  118.  
  119. # Hyperparameters for optimization below
  120. max_iter = 50000
  121. bounds  = [(0.1, None), (0.1, None)]
  122.  
  123. # L-BFGS-B optimization
  124. def optimize_bfgs(b_init, h_init, max_iterations=max_iter, tolerance=1e-17):
  125.     x_init = np.array([b_init, h_init])
  126.  
  127.     result = minimize(
  128.         fun=lambda x: cost_function(x[0], x[1]),
  129.         x0=x_init,
  130.         method='L-BFGS-B',
  131.         jac=lambda x: gradient(x[0], x[1]),
  132.         bounds=bounds,
  133.         callback=callback,
  134.         options={'maxiter': max_iterations, 'ftol': 1e-17, 'gtol': 1e-17,'eps': 0.1},
  135.         tol=tolerance,
  136.     )
  137.  
  138.     return result.x
  139.  
  140. # Initial dimensions
  141. b_init = 100
  142. h_init = 100
  143.  
  144. def analyze(b=b_init, h=h_init, max_iterations=max_iter, debug=True, method=optimize_bfgs):
  145.     global constant_accuracies_history, gradient_magnitudes, b_actual, h_actual, nb_iters
  146.     constant_accuracies_history = []
  147.     gradient_magnitudes = []
  148.     nb_iters = 0
  149.  
  150.     # Run BFGS optimization
  151.     x_opt = method(b, h, max_iterations=max_iterations)
  152.     b_opt, h_opt = x_opt
  153.  
  154.     # Print the optimized dimensions
  155.     if debug:
  156.         print()
  157.         print()
  158.         print()
  159.         print("Optimized base side length (b):", b_opt)
  160.         print("Optimized height (h):", h_opt)
  161.         print()
  162.  
  163.     # Compare the optimized dimensions to the actual dimensions of the Great Pyramid of Giza
  164.  
  165.     # Print the real dimensions
  166.     if debug:
  167.         print("Real base side length (b):", b_actual)
  168.         print("Real height (h):", h_actual)
  169.         print()
  170.  
  171.     ratio_opt = h_opt / b_opt
  172.     ratio_actual = h_actual / b_actual
  173.     ratio_accuracy = (1 - abs(ratio_opt - ratio_actual) / ratio_actual) * 100
  174.  
  175.     if debug:
  176.         print("Ratio of the optimized dimensions:", ratio_opt)
  177.         print("Ratio of the actual dimensions:", ratio_actual)
  178.         print("Accuracy of the obtained ratio:", ratio_accuracy, "%")
  179.         print()
  180.  
  181.     accuracy_mean_optimized = 0
  182.  
  183.     if nb_iters > 0:
  184.         constant_accuracies_history = np.array(constant_accuracies_history)
  185.         nb_constants = constant_accuracies_history[0].size
  186.         accuracies_excellent = np.ones(nb_constants)
  187.         accuracies_actual = np.array(constant_accuracies(b_actual, h_actual))
  188.         accuracies_optimized = np.array(constant_accuracies(b_opt, h_opt))
  189.         accuracy_mean_real_world = np.mean((accuracies_excellent - accuracies_actual)**1)
  190.         accuracy_mean_optimized = np.mean((accuracies_excellent - accuracies_optimized)**1)
  191.  
  192.         if debug:
  193.             print("Constant accuracies in real word         :", np.array2string(accuracies_actual))
  194.             print("Constant accuracies from optimization    :", np.array2string(accuracies_optimized))
  195.             print("Constant accuracies delta from real world:", np.array2string(accuracies_optimized - accuracies_actual))
  196.             print()
  197.  
  198.             print("Constant accuracies mean error in real world    :", np.array2string(accuracy_mean_real_world))
  199.             print("Constant accuracies mean error from optimization:", np.array2string(accuracy_mean_optimized))
  200.  
  201.             # Plot gradient magnitude evolution
  202.             fig, ax = plt.subplots()
  203.             ax.plot(gradient_magnitudes)
  204.             ax.set(xlabel='Iteration', ylabel='Gradient Magnitude', title='Gradient Magnitude Evolution')
  205.             plt.show()
  206.  
  207.             # Plot constant accuracy evolution
  208.             for i in range(nb_constants):
  209.                 plt.plot(constant_accuracies_history[:, i], label=f"Constant {i + 1}")
  210.             plt.xlabel('Iteration')
  211.             plt.ylabel('Accuracy')
  212.             plt.title('Constant Accuracy Evolution')
  213.             plt.legend()
  214.             plt.show()
  215.  
  216.     return b_opt, h_opt,accuracy_mean_optimized
  217.  
  218. # b = b_actual
  219. # h = h_actual
  220. # s = np.sqrt((b / 2)**2 + h**2)
  221. # a = b / 2
  222. # d = np.sqrt(2 * b**2)
  223. # p = 2 * b
  224. # ssa = np.arctan(h / (b / 2))
  225. # corner_edge_length = np.sqrt(h**2 + (d / 2)**2)
  226.  
  227. # print("b", b)
  228. # print("h", h)
  229. # print("s", s)
  230. # print("a", a)
  231. # print("d", d)
  232. # print("p", p)
  233. # print("ssa", np.degrees(ssa))
  234. # print("corner_edge_length", corner_edge_length)
  235. # print("(b / (s / a))", (b / (s / a)) / cubit)
  236.  
  237.  
  238. analyze(debug=True)
  239.  
  240. print()
  241. print()
  242. print("nb iters:", nb_iters)
  243.  
  244. accuracies_mean = []
  245. for i in range(1, nb_iters):
  246.     gradient_magnitudes = []
  247.     constant_accuracies_history = []
  248.     _, _, accuracy_mean = analyze(max_iterations=i, debug=False)
  249.     accuracies_mean.append(accuracy_mean)
  250.  
  251. # Plot mean accuracy evolution
  252. fig, ax = plt.subplots()
  253. ax.plot(accuracies_mean)
  254. ax.set(xlabel='Iteration', ylabel='Accuracy Mean', title='Accuracy Evolution')
  255. plt.show()
  256.  
  257. print()
  258. print("All accuracies")
  259. print(np.array2string(np.array(accuracies_mean)))
  260.  
  261.  
  262. # b = b_init
  263. # h = h_init
  264. # accuracies_mean_extreme = []
  265.  
  266. # print()
  267. # print("Attempting extreme accuracy")
  268.  
  269. # for i in range(100):
  270. #     b, h, _ = analyze(b=b, h=h, debug=False)
  271.  
  272. #     constant_accuracies_history = np.array(constant_accuracies_history)
  273. #     if constant_accuracies_history.size > 0:
  274. #         nb_constants = constant_accuracies_history[0].size
  275. #         accuracies_excellent = np.ones(nb_constants)
  276. #         accuracies_actual = np.array(constant_accuracies(b_actual, h_actual))
  277. #         accuracies_optimized = np.array(constant_accuracies(b, h))
  278. #         accuracy_mean_optimized = np.mean((accuracies_excellent - accuracies_optimized)**1)
  279. #         accuracies_mean_extreme.append(accuracy_mean_optimized)
  280. #     else:
  281. #         accuracies_mean_extreme.append(1)
  282.  
  283. #     print("  mean accuracy:", accuracy_mean_optimized)
  284.  
  285. # # Plot mean extreme accuracy evolution
  286. # fig, ax = plt.subplots()
  287. # ax.plot(accuracies_mean_extreme)
  288. # ax.set(xlabel='Iteration', ylabel='Extreme Accuracy Mean', title='Extreme Accuracy Evolution')
  289. # plt.show()
  290.  
  291.  
  292.  
  293.  
  294. # GPt4 attempt at freestyling the twelfth constant
  295. # b = 230.4
  296. # h = 146.6
  297.  
  298. # s = np.sqrt((b / 2)**2 + h**2)
  299. # a = (b**2 + 4 * h**2)**(1 / 2) / 2
  300.  
  301. # e_minus_1_approx = b / a
  302. # print("e - 1 approximation:", e_minus_1_approx)
  303.  
  304. # prints: e - 1 approximation: 1.235737850185782
Advertisement
Add Comment
Please, Sign In to add comment