Advertisement
Otisons

Wrote the code for the thesis, Jupiter laptop knocks out the attribute error in the penultimate part

May 21st, 2021
85
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.49 KB | None | 0 0
  1. # Keep only the data for the last year for all companies. We shall be taking only latest data into consideration.
  2. nyse_data.drop_duplicates(subset='ticker_symbol', keep='last', inplace=True)
  3. Svm_model = {}
  4. Svm_model.fit()
  5. # Adding in the predicted values for bankruptcy in the original dataset
  6. nyse_data["stability"] = Svm_model.fit(scaler.transform(nyse_data[["total_assets", "total_liabilities"]]))
  7. print ("Companies predicted to go bankrupt over a 4 year period: ", len(nyse_data.loc[nyse_data["stability"] != 1, "ticker_symbol"]))
  8.  
  9. AttributeError Traceback (most recent call last)
  10. in
  11. 2 nyse_data.drop_duplicates(subset='ticker_symbol', keep='last', inplace=True)
  12. 3 Svm_model = {}
  13. ----> 4 Svm_model.fit()
  14. 5 # Adding in the predicted values for bankruptcy in the original dataset
  15. 6 nyse_data["stability"] = Svm_model.fit(scaler.transform(nyse_data[["total_assets", "total_liabilities"]]))
  16.  
  17. AttributeError: 'dict' object has no attribute 'fit'
  18.  
  19. count = 0
  20. stock_predictions = {}
  21. stats.normaltest = {}
  22. for i in weekly_stock_prices_log:
  23. # Splitting available data into training for accuracy check using remaining data points
  24. split_point = len(weekly_stock_prices_log[i]) - 20
  25. # Number of weeks from last date in dataset to 2018-12-31 = 117
  26. num_of_predictions = len(weekly_stock_prices_log[i]) + 117
  27. training = weekly_stock_prices_log[i][0:split_point]
  28. model = {}
  29.  
  30. # Try modelling first using p=2, q=1, if that fails use p=1, q=0
  31. try:
  32. model = arima_model.ARMA(training["close"], order=(2,1)).fit()
  33. except:
  34. model = arima_model.ARMA(training["close"], order=(1,0)).fit()
  35.  
  36. #Add the predicted values in a dataframe for ease of further operations.
  37. daterange = pd.date_range(training.index[0], periods=num_of_predictions, freq = 'W-MON').tolist()
  38. stock_predictions[i] = pd.DataFrame(columns=["date", "prediction"])
  39. stock_predictions[i]["date"] = daterange
  40. stock_predictions[i]["prediction"] = model.predict(start=0, end=num_of_predictions)
  41. stock_predictions[i].set_index("date", inplace=True)
  42.  
  43. # Draw a QQPlot to check if the residuals are evenly distributed
  44. if count < 5:
  45. resid = model.resid
  46. print("For "+i+": ",stats.normaltest(resid))
  47. qqplot(resid, line='q', fit=True)
  48. plt.show()
  49. count += 1
  50.  
  51. AttributeError Traceback (most recent call last)
  52. in
  53. 1 count = 0
  54. 2 stock_predictions = {}
  55. ----> 3 stats.normaltest = {}
  56. 4 for i in weekly_stock_prices_log:
  57. 5 # Splitting available data into training for accuracy check using remaining data points
  58.  
  59. AttributeError: 'dict' object has no attribute 'normaltest'
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement