Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import pandas as pd
- import numpy as np
- import cmath as mt
- import pylab as pl
- from matplotlib import style
- style.use('ggplot')
- # Method of calculating PnL of Volatility Breakout:
- def pnl(vect, slow, fast,std_interval,m):
- size = len(vect)
- df = pd.DataFrame(0,index=range(0, size),
- columns=['ClosePrice', 'diffPrice', 'fastMA', 'slowMA', 'rollstd', 'L_Bound', 'U_Bound', 'signalMoM', 'PL'])
- # Extracting Adjusted Close Price from the csv to the dataframe
- n = 0
- for i in vect:
- df.ix[n, 'ClosePrice'] = i
- n += 1
- # Calculating the first difference of the stock price movement
- df['diffPrice'] = df['ClosePrice'].diff(1)
- # Calculating rolling mean of the fast moving average
- n = 0
- fastMA = pd.rolling_mean(vect, window=fast)
- for i in fastMA:
- df.ix[n, 'fastMA'] = i
- n += 1
- # Calculating rolling mean of the slow moving average
- n = 0
- slowMA = pd.rolling_mean(vect, window=slow)
- for i in slowMA:
- df.ix[n, 'slowMA'] = i
- n += 1
- # Calculating rolling mean of adjusted price
- n = 0
- rollstd = pd.rolling_std(vect, window=std_interval)
- for i in rollstd:
- df.ix[n, 'rollstd'] = i
- n += 1
- # Calculating the lower band
- n = 0
- L_Bound = slowMA - m*rollstd
- for i in L_Bound:
- df.ix[n, 'L_Bound'] = i
- n += 1
- # Calculating the upper band
- n = 0
- U_Bound = slowMA + m*rollstd
- for i in U_Bound:
- df.ix[n, 'U_Bound'] = i
- n += 1
- if slow > std_interval:
- count_nan = df.ix[:, 'slowMA'].isnull().sum()
- else:
- count_nan = df.ix[:, 'rollstd'].isnull().sum()
- n = count_nan
- df = df.dropna()
- # Signals of the volatility breakout:
- for i in range(n + 1, size):
- if df.ix[i - 1, 'signalMoM'] == 0 and df.ix[i - 1, 'fastMA'] > df.ix[i - 1, 'U_Bound']:
- df.ix[i, 'signalMoM'] = 1
- elif df.ix[i - 1, 'signalMoM'] == 0 and df.ix[i - 1, 'fastMA'] < df.ix[i - 1, 'L_Bound']:
- df.ix[i, 'signalMoM'] = -1
- elif df.ix[i - 1, 'signalMoM'] == 1 and df.ix[i - 1, 'fastMA'] > df.ix[i - 1, 'L_Bound']:
- df.ix[i, 'signalMoM'] = 1
- elif df.ix[i - 1, 'signalMoM'] == 1 and df.ix[i - 1, 'fastMA'] < df.ix[i - 1, 'L_Bound']:
- df.ix[i, 'signalMoM'] = -1
- elif df.ix[i - 1, 'signalMoM'] == -1 and df.ix[i - 1, 'fastMA'] < df.ix[i - 1, 'U_Bound']:
- df.ix[i, 'signalMoM'] = -1
- elif df.ix[i - 1, 'signalMoM'] == -1 and df.ix[i - 1, 'fastMA'] > df.ix[i - 1, 'U_Bound']:
- df.ix[i, 'signalMoM'] = 1
- else:
- df.ix[i, 'signalMoM'] = 0
- # Calculating PnL:
- for i in range(n + 1, size):
- df.ix[i, 'PL'] = df.ix[i, 'signalMoM'] * df.ix[i, 'diffPrice']
- # Calculating cummulative PnL:
- cummulativePL = df['PL'].cumsum(skipna=True)
- return cummulativePL
- # Importing training period
- df1 = pd.read_csv("C:/Users/Desktop/Nam/AAPL_train.csv",delimiter= ',', thousands= ',')
- # Extracting Adjusted Close Price and convert it to ndarray format
- vect = df1.ix[:, 'Adj Close'].values
- # Setting up possible values for parameters:
- slow = np.arange(60,180,30)
- fast = np.arange(10,45,5)
- std_interval = np.arange(60,180,30)
- m = np.arange(1,2.5,0.5)
- # Creating a dataframe that store PnL with corresponding parameters
- df = pd.DataFrame(index=range(0,20000),columns=['slow','fast','std_interval','m','PL'])
- n = 0
- for i in slow:
- for j in fast:
- for t in std_interval:
- for x in m:
- df.ix[n, 'slow'] = i
- df.ix[n, 'fast'] = j
- df.ix[n, 'std_interval'] = t
- df.ix[n, 'm'] = x
- df.ix[n, 'PL'] = pnl(vect,i,j,t,x).iloc[-1]
- n += 1
- result = df.sort_values(['PL'],ascending = False)
- print (result.head())
- # Best results are showed as followed:
- # slow fast std_interval m PL
- # 60 25 60 1 120.93
- # 120 40 90 1 120.536
- # 90 30 90 1 119.817
- # 90 25 90 1 117.067
- # 90 10 120 1 115.278
- # The best set of parameters are (slow = 60, fast = 25, std_interval = 60, m = 1)
- df1 = pd.read_csv("C:/Users/Desktop/Nam/AAPL_test.csv",delimiter= ',', thousands= ',')
- vect = df1.ix[:, 'Adj Close'].values
- # Calculating Buy and Hold cummulative sum and PnL over the holding period:
- BuyAndHold_PL = np.diff(vect)
- BuyAndHold_cumSum = BuyAndHold_PL.cumsum()
- BuyAndHold_totalPL = vect[-1] - vect[0]
- # Creating indexes for Buy and Hold strategy and Volatility break out strategy:
- index1 = np.arange(0,len(pnl(vect,60,25,60,1)),1)
- index2 = np.arange(0,len(BuyAndHold_cumSum),1)[59:len(BuyAndHold_cumSum)]-59
- # Defining the starting index of both strategies:
- BuyAndHold_cumSum = BuyAndHold_cumSum[59:len(BuyAndHold_cumSum)]
- Double_Vol_CumSumPL = pnl(vect,60,25,60,1)
- # Plotting Cummulative PnL of Vol Breakout against Buy and Hold strategy:
- pl.plot(index1,Double_Vol_CumSumPL,'r',label = 'Volatility Breakout cumulative PnL',linestyle='--')
- pl.plot(index2,BuyAndHold_cumSum,'b', label = 'Buy n Hold cumulative PnL')
- pl.legend(loc='top right')
- pl.show()
- # Risk free rate from 2009-2015:
- # Source: http://www.multpl.com/10-year-treasury-rate/table/by-year
- annual_Rf = [0.0252,0.0373,0.0339,0.0197,0.0191,0.0286,0.0188,0.0209]
- # Converting annual risk free rate to daily risk free rate:
- daily_annual_Rf = np.mean(annual_Rf)/252
- # Calculating Sharpe ratio:
- PnL = np.diff(Double_Vol_CumSumPL)
- PnL = np.delete(PnL,[0])
- vect = vect[60:len(vect)-1]
- PortfolioReturn = []
- for i in range(0,len(vect)):
- PortfolioReturn.append(PnL[i]/vect[i])
- mean_excess_return = np.nanmean(PortfolioReturn)
- excess_return_std = np.nanstd(np.subtract(PortfolioReturn, daily_annual_Rf))
- SharpeRatio = mt.sqrt(252) * (mean_excess_return - daily_annual_Rf) / excess_return_std
- print (SharpeRatio)
- #Sharpe Ratio is 0.67
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement