Advertisement
Guest User

Untitled

a guest
Sep 4th, 2015
144
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 10.90 KB | None | 0 0
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": 1,
  6. "metadata": {
  7. "collapsed": true
  8. },
  9. "outputs": [],
  10. "source": [
  11. "import pandas as pd\n",
  12. "import numpy as np\n",
  13. "import os\n",
  14. "from sklearn.base import BaseEstimator, TransformerMixin\n",
  15. "from sklearn.pipeline import FeatureUnion\n",
  16. "from sklearn.linear_model.logistic import LogisticRegression\n",
  17. "import matplotlib.pyplot as plt\n",
  18. "import seaborn as sns\n",
  19. "sns.set_style('whitegrid')\n",
  20. "%matplotlib inline\n",
  21. "import sklearn\n",
  22. "from sklearn.linear_model import LinearRegression\n",
  23. "from sklearn import ensemble, preprocessing, cross_validation\n",
  24. "from sklearn.metrics import roc_auc_score as auc\n",
  25. "from time import time\n",
  26. "from sklearn import linear_model\n",
  27. "from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC\n",
  28. "from sklearn import datasets\n",
  29. "from sklearn.linear_model import SGDClassifier\n",
  30. "from sklearn.linear_model import ElasticNet\n",
  31. "from sklearn.preprocessing import PolynomialFeatures\n",
  32. "from sklearn import svm\n",
  33. "from sklearn.cross_validation import cross_val_score\n",
  34. "from sklearn.datasets import make_blobs\n",
  35. "from sklearn.metrics import mean_squared_error\n",
  36. "from sklearn.datasets import make_friedman1\n",
  37. "from sklearn.ensemble import GradientBoostingRegressor\n",
  38. "from sklearn import gaussian_process\n",
  39. "from sklearn.linear_model import SGDClassifier\n",
  40. "from sklearn.ensemble import RandomForestClassifier\n",
  41. "from sklearn.ensemble import ExtraTreesClassifier\n",
  42. "from sklearn.tree import DecisionTreeClassifier\n",
  43. "from sklearn.ensemble import AdaBoostClassifier\n",
  44. "from sklearn import neighbors, datasets\n",
  45. "from sklearn import tree\n",
  46. "from sklearn.datasets import make_hastie_10_2\n",
  47. "from sklearn.ensemble import GradientBoostingClassifier"
  48. ]
  49. },
  50. {
  51. "cell_type": "code",
  52. "execution_count": 2,
  53. "metadata": {
  54. "collapsed": false
  55. },
  56. "outputs": [],
  57. "source": [
  58. "script_path = os.path.abspath(os.path.dirname(\"__file__\"))"
  59. ]
  60. },
  61. {
  62. "cell_type": "code",
  63. "execution_count": 3,
  64. "metadata": {
  65. "collapsed": true
  66. },
  67. "outputs": [],
  68. "source": [
  69. "class Get_Price_Rate(BaseEstimator, TransformerMixin):\n",
  70. " '''\n",
  71. " get price rate\n",
  72. " '''\n",
  73. "\n",
  74. " def get_feature_names(self):\n",
  75. "\n",
  76. " return [self.__class__.__name__]\n",
  77. "\n",
  78. " def fit(self, date_frame, y=None):\n",
  79. " '''\n",
  80. " fit\n",
  81. "\n",
  82. " :param pandas.DataFrame: all data\n",
  83. " :rtype: Get_Price_Rate\n",
  84. " '''\n",
  85. "\n",
  86. " return self\n",
  87. "\n",
  88. " def transform(self, date_frame):\n",
  89. " '''\n",
  90. " transform\n",
  91. "\n",
  92. " :param pandas.DataFrame: all data\n",
  93. " :rtype: array\n",
  94. " '''\n",
  95. "\n",
  96. " return date_frame[\"PRICE_RATE\"].as_matrix()[None].T.astype(np.float)"
  97. ]
  98. },
  99. {
  100. "cell_type": "code",
  101. "execution_count": 4,
  102. "metadata": {
  103. "collapsed": true
  104. },
  105. "outputs": [],
  106. "source": [
  107. "class Get_Match_Pref(BaseEstimator, TransformerMixin):\n",
  108. " '''\n",
  109. " get user pref is match coupon area\n",
  110. " '''\n",
  111. "\n",
  112. " def get_feature_names(self):\n",
  113. "\n",
  114. " return [self.__class__.__name__]\n",
  115. "\n",
  116. " def fit(self, date_frame, y=None):\n",
  117. " '''\n",
  118. " fit\n",
  119. "\n",
  120. " :param pandas.DataFrame: all data\n",
  121. " :rtype: Get_Price_Rate\n",
  122. " '''\n",
  123. "\n",
  124. " return self\n",
  125. "\n",
  126. " def transform(self, date_frame):\n",
  127. " '''\n",
  128. " transform\n",
  129. "\n",
  130. " :param pandas.DataFrame: all data\n",
  131. " :rtype: array\n",
  132. " '''\n",
  133. " res_sr = date_frame[\"PREF_NAME\"] == date_frame[\"ken_name\"]\n",
  134. "\n",
  135. " return res_sr.as_matrix()[None].T.astype(np.float)"
  136. ]
  137. },
  138. {
  139. "cell_type": "code",
  140. "execution_count": 5,
  141. "metadata": {
  142. "collapsed": true
  143. },
  144. "outputs": [],
  145. "source": [
  146. "def top_merge(df, n=10, column=\"predict\", merge_column=\"COUPON_ID_hash\"):\n",
  147. " '''\n",
  148. " get top n row\n",
  149. "\n",
  150. " :param pandas.DataFrame df:\n",
  151. " :param int n:\n",
  152. " :param str column:\n",
  153. " :rtype: pandas.DataFrame\n",
  154. " '''\n",
  155. "\n",
  156. " return \" \".join(df.sort_index(by=column)[-n:][merge_column])"
  157. ]
  158. },
  159. {
  160. "cell_type": "code",
  161. "execution_count": 6,
  162. "metadata": {
  163. "collapsed": true
  164. },
  165. "outputs": [],
  166. "source": [
  167. "feature_list = [\n",
  168. " ('PRICE_RATE', Get_Price_Rate()),\n",
  169. " ('MATCH_PREF', Get_Match_Pref()),\n",
  170. "]"
  171. ]
  172. },
  173. {
  174. "cell_type": "code",
  175. "execution_count": 7,
  176. "metadata": {
  177. "collapsed": false
  178. },
  179. "outputs": [],
  180. "source": [
  181. "if __name__ == '__main__':\n",
  182. " # import csv\n",
  183. " user_df = pd.read_csv(\"C:\\\\Users\\\\Vikrant\\\\Coupon\\\\user_list.csv\")\n",
  184. " train_coupon_df = pd.read_csv(\"C:\\\\Users\\\\Vikrant\\\\Coupon\\\\coupon_list_train.csv\")\n",
  185. " train_visit_df = pd.read_csv(\"C:\\\\Users\\\\Vikrant\\\\Coupon\\\\coupon_visit_train.csv\")\n",
  186. " test_coupon_df = pd.read_csv(\"C:\\\\Users\\\\Vikrant\\\\Coupon\\\\coupon_list_test.csv\")"
  187. ]
  188. },
  189. {
  190. "cell_type": "code",
  191. "execution_count": 8,
  192. "metadata": {
  193. "collapsed": false
  194. },
  195. "outputs": [],
  196. "source": [
  197. "# create train_df\n",
  198. "train_df = pd.merge(train_visit_df, train_coupon_df,\n",
  199. " left_on=\"VIEW_COUPON_ID_hash\", right_on=\"COUPON_ID_hash\")\n",
  200. "train_df = pd.merge(train_df, user_df,\n",
  201. " left_on=\"USER_ID_hash\", right_on=\"USER_ID_hash\")"
  202. ]
  203. },
  204. {
  205. "cell_type": "code",
  206. "execution_count": 9,
  207. "metadata": {
  208. "collapsed": true
  209. },
  210. "outputs": [],
  211. "source": [
  212. " # create train feature\n",
  213. " fu_obj = FeatureUnion(transformer_list=feature_list)\n",
  214. " X_train = fu_obj.fit_transform(train_df)\n",
  215. " y_train = train_df[\"PURCHASE_FLG\"]\n",
  216. " assert X_train.shape[0] == y_train.size"
  217. ]
  218. },
  219. {
  220. "cell_type": "code",
  221. "execution_count": 10,
  222. "metadata": {
  223. "collapsed": false
  224. },
  225. "outputs": [
  226. {
  227. "data": {
  228. "text/plain": [
  229. "GradientBoostingClassifier(init=None, learning_rate=1.0, loss='deviance',\n",
  230. " max_depth=3, max_features=None, max_leaf_nodes=None,\n",
  231. " min_samples_leaf=1, min_samples_split=2,\n",
  232. " min_weight_fraction_leaf=0.0, n_estimators=100,\n",
  233. " random_state=50, subsample=1.0, verbose=0, warm_start=False)"
  234. ]
  235. },
  236. "execution_count": 10,
  237. "metadata": {},
  238. "output_type": "execute_result"
  239. }
  240. ],
  241. "source": [
  242. " # fit model\n",
  243. " #clf = LogisticRegression()\n",
  244. " #clf = ensemble.RandomForestClassifier(n_jobs=4, n_estimators = 20, random_state = 11)\n",
  245. " #clf = ensemble.RandomForestClassifier(n_jobs=500, n_estimators = 1000, random_state = 15)\n",
  246. " #SVC\n",
  247. " #clf = svm.SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3, gamma=0.0, kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False)\n",
  248. " #clf = svm.SVC(C=2.0, cache_size=200, class_weight=1, coef0=0.0, degree=3, gamma=0.0, kernel='rbf', max_iter=-1, probability=True, random_state=4, shrinking=True, tol=0.001, verbose=False)\n",
  249. " #Stochastic Gradient Descent\n",
  250. " #clf = SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1, eta0=0.0, fit_intercept=True, l1_ratio=0.15, learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5, random_state=None, shuffle=True, verbose=0, warm_start=False)\n",
  251. " #clf=SGDClassifier(loss='log',alpha=0.000001,n_iter=100)\n",
  252. " #clf = DecisionTreeClassifier(max_depth=5, min_samples_split=1, random_state=20)\n",
  253. " #RandomForestClassifier - 0.75985\n",
  254. " #clf = RandomForestClassifier(n_estimators=100, max_depth=10, min_samples_split=1, random_state=50)\n",
  255. " #clf = RandomForestClassifier(n_estimators=400, max_depth=20, min_samples_split=1, random_state=200)\n",
  256. " #ExtraTreesClassifier - 0.76\n",
  257. " #clf = ExtraTreesClassifier(n_estimators=150, max_depth=20, min_samples_split=2, random_state=100)\n",
  258. " #clf = ExtraTreesClassifier(n_estimators=200, max_depth=30, min_samples_split=4, random_state=200)\n",
  259. " #Nearest Neighbors Classifier\n",
  260. " #clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)\n",
  261. " #clf = neighbors.KNeighborsClassifier()\n",
  262. " #Decision Tree Classifier\n",
  263. " #clf = tree.DecisionTreeClassifier()\n",
  264. " #Adaboost\n",
  265. " #clf = AdaBoostClassifier(n_estimators=100)\n",
  266. " #GradientBoostingClassifier# - 0.76858\n",
  267. " #clf = GradientBoostingClassifier(n_estimators=400, learning_rate=1.0, max_depth=3, random_state=200)\n",
  268. " clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=3, random_state=50)\n",
  269. " clf.fit(X_train, y_train)"
  270. ]
  271. },
  272. {
  273. "cell_type": "code",
  274. "execution_count": 11,
  275. "metadata": {
  276. "collapsed": true
  277. },
  278. "outputs": [],
  279. "source": [
  280. " # create test_df\n",
  281. " test_coupon_df[\"cross\"] = 1\n",
  282. " user_df[\"cross\"] = 1\n",
  283. " test_df = pd.merge(test_coupon_df, user_df, on=\"cross\")"
  284. ]
  285. },
  286. {
  287. "cell_type": "code",
  288. "execution_count": 12,
  289. "metadata": {
  290. "collapsed": true
  291. },
  292. "outputs": [],
  293. "source": [
  294. " # create test Feature\n",
  295. " X_test = fu_obj.transform(test_df)"
  296. ]
  297. },
  298. {
  299. "cell_type": "code",
  300. "execution_count": 13,
  301. "metadata": {
  302. "collapsed": false
  303. },
  304. "outputs": [],
  305. "source": [
  306. " # predict test data\n",
  307. " predict_proba = clf.predict_proba(X_test)\n",
  308. " pos_idx = np.where(clf.classes_ == True)[0][0]\n",
  309. " test_df[\"predict\"] = predict_proba[:, pos_idx]\n",
  310. " top10_coupon = test_df.groupby(\"USER_ID_hash\").apply(top_merge)\n",
  311. " top10_coupon.name = \"PURCHASED_COUPONS\"\n",
  312. " top10_coupon.to_csv(\"submission.csv\", header=True)"
  313. ]
  314. },
  315. {
  316. "cell_type": "code",
  317. "execution_count": null,
  318. "metadata": {
  319. "collapsed": true
  320. },
  321. "outputs": [],
  322. "source": []
  323. }
  324. ],
  325. "metadata": {
  326. "kernelspec": {
  327. "display_name": "Python 2",
  328. "language": "python",
  329. "name": "python2"
  330. },
  331. "language_info": {
  332. "codemirror_mode": {
  333. "name": "ipython",
  334. "version": 2
  335. },
  336. "file_extension": ".py",
  337. "mimetype": "text/x-python",
  338. "name": "python",
  339. "nbconvert_exporter": "python",
  340. "pygments_lexer": "ipython2",
  341. "version": "2.7.10"
  342. }
  343. },
  344. "nbformat": 4,
  345. "nbformat_minor": 0
  346. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement