Advertisement
Guest User

Untitled

a guest
Jan 5th, 2017
121
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 79.01 KB | None | 0 0
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": 8,
  6. "metadata": {
  7. "collapsed": false
  8. },
  9. "outputs": [],
  10. "source": [
  11. "from __future__ import print_function\n",
  12. "import tensorflow as tf\n",
  13. "from tensorflow.contrib.learn.python.learn.utils import export\n",
  14. "from tensorflow.contrib.learn.python.learn.utils import input_fn_utils\n",
  15. "from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils\n",
  16. "from tensorflow.contrib.session_bundle import manifest_pb2\n",
  17. "from tensorflow.contrib.session_bundle import exporter\n",
  18. "import pandas as pd\n",
  19. "\n",
  20. "tf.logging.set_verbosity(tf.logging.DEBUG)"
  21. ]
  22. },
  23. {
  24. "cell_type": "code",
  25. "execution_count": 13,
  26. "metadata": {
  27. "collapsed": false
  28. },
  29. "outputs": [
  30. {
  31. "data": {
  32. "text/html": [
  33. "<div>\n",
  34. "<table border=\"1\" class=\"dataframe\">\n",
  35. " <thead>\n",
  36. " <tr style=\"text-align: right;\">\n",
  37. " <th></th>\n",
  38. " <th>id</th>\n",
  39. " <th>first_name</th>\n",
  40. " <th>last_name</th>\n",
  41. " <th>email</th>\n",
  42. " <th>age</th>\n",
  43. " <th>gender</th>\n",
  44. " <th>state</th>\n",
  45. " <th>purchase_recency</th>\n",
  46. " <th>purchase_frequency</th>\n",
  47. " <th>monetary_value</th>\n",
  48. " <th>interaction_recency</th>\n",
  49. " <th>interaction_frequency</th>\n",
  50. " <th>target</th>\n",
  51. " </tr>\n",
  52. " </thead>\n",
  53. " <tbody>\n",
  54. " <tr>\n",
  55. " <th>0</th>\n",
  56. " <td>88838</td>\n",
  57. " <td>Sarina</td>\n",
  58. " <td>Friesen</td>\n",
  59. " <td>sarina_friesen@example.org</td>\n",
  60. " <td>20</td>\n",
  61. " <td>n/a</td>\n",
  62. " <td>DE-BE</td>\n",
  63. " <td>1</td>\n",
  64. " <td>5</td>\n",
  65. " <td>9</td>\n",
  66. " <td>10</td>\n",
  67. " <td>5</td>\n",
  68. " <td>False</td>\n",
  69. " </tr>\n",
  70. " <tr>\n",
  71. " <th>1</th>\n",
  72. " <td>31524</td>\n",
  73. " <td>Nella</td>\n",
  74. " <td>Heller</td>\n",
  75. " <td>heller_nella@example.org</td>\n",
  76. " <td>10</td>\n",
  77. " <td>male</td>\n",
  78. " <td>n/a</td>\n",
  79. " <td>8</td>\n",
  80. " <td>8</td>\n",
  81. " <td>10</td>\n",
  82. " <td>2</td>\n",
  83. " <td>3</td>\n",
  84. " <td>True</td>\n",
  85. " </tr>\n",
  86. " <tr>\n",
  87. " <th>2</th>\n",
  88. " <td>3118</td>\n",
  89. " <td>Alisa</td>\n",
  90. " <td>Deckow</td>\n",
  91. " <td>alisa.deckow@example.org</td>\n",
  92. " <td>40</td>\n",
  93. " <td>female</td>\n",
  94. " <td>DE-NW</td>\n",
  95. " <td>0</td>\n",
  96. " <td>9</td>\n",
  97. " <td>7</td>\n",
  98. " <td>6</td>\n",
  99. " <td>9</td>\n",
  100. " <td>False</td>\n",
  101. " </tr>\n",
  102. " <tr>\n",
  103. " <th>3</th>\n",
  104. " <td>93388</td>\n",
  105. " <td>Edmond</td>\n",
  106. " <td>Zemlak</td>\n",
  107. " <td>zemlak_edmond@example.net</td>\n",
  108. " <td>25</td>\n",
  109. " <td>n/a</td>\n",
  110. " <td>DE-MV</td>\n",
  111. " <td>4</td>\n",
  112. " <td>10</td>\n",
  113. " <td>7</td>\n",
  114. " <td>4</td>\n",
  115. " <td>9</td>\n",
  116. " <td>True</td>\n",
  117. " </tr>\n",
  118. " <tr>\n",
  119. " <th>4</th>\n",
  120. " <td>32469</td>\n",
  121. " <td>Garry</td>\n",
  122. " <td>Moen</td>\n",
  123. " <td>moen.garry@example.net</td>\n",
  124. " <td>40</td>\n",
  125. " <td>female</td>\n",
  126. " <td>DE-BE</td>\n",
  127. " <td>3</td>\n",
  128. " <td>3</td>\n",
  129. " <td>3</td>\n",
  130. " <td>3</td>\n",
  131. " <td>8</td>\n",
  132. " <td>False</td>\n",
  133. " </tr>\n",
  134. " </tbody>\n",
  135. "</table>\n",
  136. "</div>"
  137. ],
  138. "text/plain": [
  139. " id first_name last_name email age gender state \\\n",
  140. "0 88838 Sarina Friesen sarina_friesen@example.org 20 n/a DE-BE \n",
  141. "1 31524 Nella Heller heller_nella@example.org 10 male n/a \n",
  142. "2 3118 Alisa Deckow alisa.deckow@example.org 40 female DE-NW \n",
  143. "3 93388 Edmond Zemlak zemlak_edmond@example.net 25 n/a DE-MV \n",
  144. "4 32469 Garry Moen moen.garry@example.net 40 female DE-BE \n",
  145. "\n",
  146. " purchase_recency purchase_frequency monetary_value interaction_recency \\\n",
  147. "0 1 5 9 10 \n",
  148. "1 8 8 10 2 \n",
  149. "2 0 9 7 6 \n",
  150. "3 4 10 7 4 \n",
  151. "4 3 3 3 3 \n",
  152. "\n",
  153. " interaction_frequency target \n",
  154. "0 5 False \n",
  155. "1 3 True \n",
  156. "2 9 False \n",
  157. "3 9 True \n",
  158. "4 8 False "
  159. ]
  160. },
  161. "execution_count": 13,
  162. "metadata": {},
  163. "output_type": "execute_result"
  164. }
  165. ],
  166. "source": [
  167. "model_dir = \"/models/targeting\"\n",
  168. "\n",
  169. "train_file = \"data/customers-rfmi.train.csv\"\n",
  170. "test_file = \"data/customers-rfmi.test.csv\"\n",
  171. "\n",
  172. "LABEL_COLUMN = \"target\"\n",
  173. "COLUMNS = [\"id\", \"gender\", \"age\", LABEL_COLUMN]\n",
  174. "CATEGORICAL_COLUMNS = [\"gender\", \"state\"]\n",
  175. "CONTINUOUS_COLUMNS = [\"age\",\n",
  176. " \"purchase_recency\", \"purchase_frequency\", \"monetary_value\",\n",
  177. " \"interaction_recency\", \"interaction_frequency\"]\n",
  178. "\n",
  179. "def load_df(filename):\n",
  180. " df = pd.read_csv(filename, skipinitialspace=True)\n",
  181. " df['gender'] = df['gender'].fillna('n/a')\n",
  182. " df['state'] = df['state'].fillna('n/a')\n",
  183. " return df\n",
  184. " \n",
  185. "df_train, df_test = load_df(train_file), load_df(test_file)\n",
  186. "df_train.head()"
  187. ]
  188. },
  189. {
  190. "cell_type": "code",
  191. "execution_count": 14,
  192. "metadata": {
  193. "collapsed": false
  194. },
  195. "outputs": [],
  196. "source": [
  197. "def input_fn(df):\n",
  198. " # Creates a dictionary mapping from each continuous feature column name (k) to\n",
  199. " # the values of that column stored in a constant Tensor.\n",
  200. " continuous_cols = {k: tf.constant(df[k].values)\n",
  201. " for k in CONTINUOUS_COLUMNS}\n",
  202. "\n",
  203. " # Creates a dictionary mapping from each categorical feature column name (k)\n",
  204. " # to the values of that column stored in a tf.SparseTensor.\n",
  205. " categorical_cols = {k: tf.SparseTensor(\n",
  206. " indices=[[i, 0] for i in range(df[k].size)],\n",
  207. " values=df[k].values,\n",
  208. " shape=[df[k].size, 1])\n",
  209. " for k in CATEGORICAL_COLUMNS}\n",
  210. " \n",
  211. " # Merges the two dictionaries into one.\n",
  212. " feature_cols = dict(continuous_cols.items() + categorical_cols.items())\n",
  213. " # Converts the label column into a constant Tensor.\n",
  214. " label = tf.constant(df[LABEL_COLUMN].values)\n",
  215. " # Returns the feature columns and the label.\n",
  216. " return feature_cols, label"
  217. ]
  218. },
  219. {
  220. "cell_type": "code",
  221. "execution_count": 15,
  222. "metadata": {
  223. "collapsed": false
  224. },
  225. "outputs": [],
  226. "source": [
  227. "# Sparse base columns.\n",
  228. "gender = tf.contrib.layers.sparse_column_with_keys(column_name=\"gender\",\n",
  229. " keys=[\"female\", \"male\"],\n",
  230. " combiner=\"sum\")\n",
  231. "state = tf.contrib.layers.sparse_column_with_hash_bucket(\"state\", hash_bucket_size=100, combiner=\"sum\")\n",
  232. "\n",
  233. "\n",
  234. "# Continuous base columns.\n",
  235. "age = tf.contrib.layers.real_valued_column(\"age\")\n",
  236. "purchase_recency = tf.contrib.layers.real_valued_column(\"purchase_recency\")\n",
  237. "purchase_frequency = tf.contrib.layers.real_valued_column(\"purchase_frequency\")\n",
  238. "monetary_value = tf.contrib.layers.real_valued_column(\"monetary_value\")\n",
  239. "interaction_recency = tf.contrib.layers.real_valued_column(\"interaction_recency\")\n",
  240. "interaction_frequency = tf.contrib.layers.real_valued_column(\"interaction_frequency\")\n",
  241. "\n",
  242. "# Transformations.\n",
  243. "age_buckets = tf.contrib.layers.bucketized_column(age,\n",
  244. " boundaries=[\n",
  245. " 10, 14, 18, 21, \n",
  246. " 25, 30, 35, 40,\n",
  247. " 50, 55, 60, 65\n",
  248. " ])\n",
  249. "age_x_gender = tf.contrib.layers.crossed_column([age_buckets, gender], hash_bucket_size=int(1e4), combiner='sum')\n",
  250. "\n",
  251. "# embeddings\n",
  252. "gender_embedding = tf.contrib.layers.embedding_column(gender, dimension=8, combiner=\"mean\")\n",
  253. "state_embedding = tf.contrib.layers.embedding_column(state, dimension=8, combiner=\"mean\")"
  254. ]
  255. },
  256. {
  257. "cell_type": "code",
  258. "execution_count": 16,
  259. "metadata": {
  260. "collapsed": false
  261. },
  262. "outputs": [
  263. {
  264. "name": "stdout",
  265. "output_type": "stream",
  266. "text": [
  267. "INFO:tensorflow:Using default config.\n",
  268. "INFO:tensorflow:Using config: {'save_summary_steps': 100, '_num_ps_replicas': 0, '_task_type': None, '_environment': 'local', '_is_chief': True, 'save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7ff0c9f7aa10>, 'tf_config': gpu_options {\n",
  269. " per_process_gpu_memory_fraction: 1\n",
  270. "}\n",
  271. ", '_task_id': 0, 'tf_random_seed': None, 'keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', 'save_checkpoints_steps': None, '_master': '', 'keep_checkpoint_max': 5}\n",
  272. "INFO:tensorflow:Using default config.\n",
  273. "INFO:tensorflow:Using config: {'save_summary_steps': 100, '_num_ps_replicas': 0, '_task_type': None, '_environment': 'local', '_is_chief': True, 'save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7ff0f473df90>, 'tf_config': gpu_options {\n",
  274. " per_process_gpu_memory_fraction: 1\n",
  275. "}\n",
  276. ", '_task_id': 0, 'tf_random_seed': None, 'keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', 'save_checkpoints_steps': None, '_master': '', 'keep_checkpoint_max': 5}\n",
  277. "INFO:tensorflow:Using default config.\n",
  278. "INFO:tensorflow:Using config: {'save_summary_steps': 100, '_num_ps_replicas': 0, '_task_type': None, '_environment': 'local', '_is_chief': True, 'save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7ff0c8c94490>, 'tf_config': gpu_options {\n",
  279. " per_process_gpu_memory_fraction: 1\n",
  280. "}\n",
  281. ", '_task_id': 0, 'tf_random_seed': None, 'keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', 'save_checkpoints_steps': None, '_master': '', 'keep_checkpoint_max': 5}\n"
  282. ]
  283. }
  284. ],
  285. "source": [
  286. "wide_columns = [\n",
  287. " gender, state,\n",
  288. " purchase_recency, purchase_frequency, monetary_value,\n",
  289. " interaction_recency, interaction_frequency,\n",
  290. " age_buckets, age_x_gender\n",
  291. "]\n",
  292. "\n",
  293. "deep_columns = [\n",
  294. " gender_embedding, state_embedding,\n",
  295. " purchase_recency, purchase_frequency, monetary_value,\n",
  296. " interaction_recency, interaction_frequency\n",
  297. "]\n",
  298. "\n",
  299. "wide = tf.contrib.learn.LinearClassifier(model_dir=model_dir + '_linear',\n",
  300. " feature_columns=wide_columns,\n",
  301. " enable_centered_bias=True)\n",
  302. "\n",
  303. "deep = tf.contrib.learn.DNNClassifier(model_dir=model_dir + '_deep',\n",
  304. " feature_columns=deep_columns,\n",
  305. " hidden_units=[100, 50])\n",
  306. "\n",
  307. "hybrid = tf.contrib.learn.DNNLinearCombinedClassifier(\n",
  308. " model_dir=model_dir + '_hybrid',\n",
  309. " linear_feature_columns=wide_columns,\n",
  310. " dnn_feature_columns=deep_columns,\n",
  311. " dnn_hidden_units=[100, 50])"
  312. ]
  313. },
  314. {
  315. "cell_type": "code",
  316. "execution_count": 17,
  317. "metadata": {
  318. "collapsed": false,
  319. "scrolled": false
  320. },
  321. "outputs": [],
  322. "source": [
  323. "def fix_feature_spec(feature_spec):\n",
  324. " for key, feature in feature_spec.items():\n",
  325. " if isinstance(feature, tf.VarLenFeature):\n",
  326. " feature_spec[key] = tf.FixedLenFeature(shape=[1], dtype=feature.dtype, default_value=None)\n",
  327. "\n",
  328. "def train_and_export(model, feature_columns):\n",
  329. " model.fit(input_fn=lambda: input_fn(df_train), steps=1000)\n",
  330. " results = model.evaluate(input_fn=lambda: input_fn(df_test), steps=1)\n",
  331. " for key in sorted(results):\n",
  332. " print(\"%s: %s\" % (key, results[key]))\n",
  333. " \n",
  334. " feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(feature_columns)\n",
  335. " fix_feature_spec(feature_spec)\n",
  336. "\n",
  337. " export_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)\n",
  338. "\n",
  339. " export_path = model.export_savedmodel(model.model_dir, export_input_fn)\n",
  340. " return export_path "
  341. ]
  342. },
  343. {
  344. "cell_type": "code",
  345. "execution_count": 18,
  346. "metadata": {
  347. "collapsed": false,
  348. "scrolled": false
  349. },
  350. "outputs": [
  351. {
  352. "name": "stdout",
  353. "output_type": "stream",
  354. "text": [
  355. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:450 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.\n",
  356. "Instructions for updating:\n",
  357. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  358. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  359. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  360. "Example conversion:\n",
  361. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  362. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:450 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.\n",
  363. "Instructions for updating:\n",
  364. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  365. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  366. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  367. "Example conversion:\n",
  368. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  369. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:450 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.\n",
  370. "Instructions for updating:\n",
  371. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  372. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  373. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  374. "Example conversion:\n",
  375. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  376. "DEBUG:tensorflow:Setting feature info for mode train to {'purchase_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'gender': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'age': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'interaction_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'interaction_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'state': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'monetary_value': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False), 'purchase_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(80000)]), is_sparse=False)}.\n",
  377. "DEBUG:tensorflow:Setting labels info for mode train to TensorSignature(dtype=tf.bool, shape=TensorShape([Dimension(80000)]), is_sparse=False)\n",
  378. "DEBUG:tensorflow:Transforming feature_column _BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65))\n",
  379. "DEBUG:tensorflow:Transforming feature_column _CrossedColumn(columns=(_BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65)), _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)), hash_bucket_size=10000, hash_key=None, combiner='sum', ckpt_to_load_from=None, tensor_name_in_ckpt=None)\n",
  380. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/feature_column.py:1751 in insert_transformed_feature.: calling sparse_feature_cross (from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op) with hash_key=None is deprecated and will be removed after 2016-11-20.\n",
  381. "Instructions for updating:\n",
  382. "The default behavior of sparse_feature_cross is changing, the default\n",
  383. "value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n",
  384. "From that point on sparse_feature_cross will always use FingerprintCat64\n",
  385. "to concatenate the feature fingerprints. And the underlying\n",
  386. "_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n",
  387. "as deprecated.\n",
  388. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  389. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  390. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  391. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  392. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  393. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  394. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  395. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  396. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  397. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  398. "DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)\n",
  399. "DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string)\n",
  400. "INFO:tensorflow:Create CheckpointSaverHook.\n",
  401. "INFO:tensorflow:loss = 0.671348, step = 7001\n",
  402. "INFO:tensorflow:Saving checkpoints for 7001 into /models/targeting_linear/model.ckpt.\n",
  403. "WARNING:tensorflow:*******************************************************\n",
  404. "WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
  405. "WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
  406. "WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
  407. "WARNING:tensorflow:now on by default.\n",
  408. "WARNING:tensorflow:*******************************************************\n",
  409. "INFO:tensorflow:loss = 0.670908, step = 7101\n",
  410. "INFO:tensorflow:global_step/sec: 16.8636\n",
  411. "INFO:tensorflow:loss = 0.670837, step = 7201\n",
  412. "INFO:tensorflow:global_step/sec: 21.518\n",
  413. "INFO:tensorflow:loss = 0.670809, step = 7301\n",
  414. "INFO:tensorflow:global_step/sec: 21.7738\n",
  415. "INFO:tensorflow:loss = 0.670792, step = 7401\n",
  416. "INFO:tensorflow:global_step/sec: 21.8282\n",
  417. "INFO:tensorflow:loss = 0.670781, step = 7501\n",
  418. "INFO:tensorflow:global_step/sec: 20.6731\n",
  419. "INFO:tensorflow:loss = 0.670775, step = 7601\n",
  420. "INFO:tensorflow:global_step/sec: 20.3416\n",
  421. "INFO:tensorflow:loss = 0.670768, step = 7701\n",
  422. "INFO:tensorflow:global_step/sec: 21.4195\n",
  423. "INFO:tensorflow:loss = 0.670762, step = 7801\n",
  424. "INFO:tensorflow:global_step/sec: 21.845\n",
  425. "INFO:tensorflow:loss = 0.670759, step = 7901\n",
  426. "INFO:tensorflow:global_step/sec: 21.8376\n",
  427. "INFO:tensorflow:Saving checkpoints for 8000 into /models/targeting_linear/model.ckpt.\n",
  428. "WARNING:tensorflow:*******************************************************\n",
  429. "WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
  430. "WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
  431. "WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
  432. "WARNING:tensorflow:now on by default.\n",
  433. "WARNING:tensorflow:*******************************************************\n",
  434. "INFO:tensorflow:Loss for final step: 0.670755.\n",
  435. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:458 in evaluate.: calling evaluate (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.\n",
  436. "Instructions for updating:\n",
  437. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  438. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  439. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  440. "Example conversion:\n",
  441. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  442. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:458 in evaluate.: calling evaluate (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.\n",
  443. "Instructions for updating:\n",
  444. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  445. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  446. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  447. "Example conversion:\n",
  448. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  449. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/linear.py:458 in evaluate.: calling evaluate (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.\n",
  450. "Instructions for updating:\n",
  451. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  452. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  453. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  454. "Example conversion:\n",
  455. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  456. "DEBUG:tensorflow:Setting feature info for mode eval to {'purchase_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'gender': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'age': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'interaction_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'interaction_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'state': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'monetary_value': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False), 'purchase_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(20000)]), is_sparse=False)}.\n",
  457. "DEBUG:tensorflow:Setting labels info for mode eval to TensorSignature(dtype=tf.bool, shape=TensorShape([Dimension(20000)]), is_sparse=False)\n",
  458. "DEBUG:tensorflow:Transforming feature_column _BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65))\n",
  459. "DEBUG:tensorflow:Transforming feature_column _CrossedColumn(columns=(_BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65)), _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)), hash_bucket_size=10000, hash_key=None, combiner='sum', ckpt_to_load_from=None, tensor_name_in_ckpt=None)\n",
  460. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/feature_column.py:1751 in insert_transformed_feature.: calling sparse_feature_cross (from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op) with hash_key=None is deprecated and will be removed after 2016-11-20.\n",
  461. "Instructions for updating:\n",
  462. "The default behavior of sparse_feature_cross is changing, the default\n",
  463. "value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n",
  464. "From that point on sparse_feature_cross will always use FingerprintCat64\n",
  465. "to concatenate the feature fingerprints. And the underlying\n",
  466. "_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n",
  467. "as deprecated.\n",
  468. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  469. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  470. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  471. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  472. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  473. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  474. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  475. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  476. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  477. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  478. "DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)\n",
  479. "DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string)\n",
  480. "INFO:tensorflow:Restored model from /models/targeting_linear\n",
  481. "INFO:tensorflow:Eval steps [0,1) for training step 8000.\n",
  482. "INFO:tensorflow:Saving evaluation summary for step 8000: accuracy = 0.57405, accuracy/baseline_label_mean = 0.4257, accuracy/threshold_0.500000_mean = 0.57405, auc = 0.589328, labels/actual_label_mean = 0.4257, labels/prediction_mean = 0.429727, loss = 0.670349, precision/positive_threshold_0.500000_mean = 0.499326, recall/positive_threshold_0.500000_mean = 0.217642\n",
  483. "accuracy: 0.57405\n",
  484. "accuracy/baseline_label_mean: 0.4257\n",
  485. "accuracy/threshold_0.500000_mean: 0.57405\n",
  486. "auc: 0.589328\n",
  487. "global_step: 8000\n",
  488. "labels/actual_label_mean: 0.4257\n",
  489. "labels/prediction_mean: 0.429727\n",
  490. "loss: 0.670349\n",
  491. "precision/positive_threshold_0.500000_mean: 0.499326\n",
  492. "recall/positive_threshold_0.500000_mean: 0.217642\n",
  493. "WARNING:tensorflow:export_savedmodel (from tensorflow.contrib.learn.python.learn.estimators.linear) is experimental and may change or be removed at any time, and without warning.\n",
  494. "WARNING:tensorflow:export_savedmodel (from tensorflow.contrib.learn.python.learn.estimators.estimator) is experimental and may change or be removed at any time, and without warning.\n",
  495. "DEBUG:tensorflow:Transforming feature_column _BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65))\n",
  496. "DEBUG:tensorflow:Transforming feature_column _CrossedColumn(columns=(_BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65)), _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)), hash_bucket_size=10000, hash_key=None, combiner='sum', ckpt_to_load_from=None, tensor_name_in_ckpt=None)\n",
  497. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/feature_column.py:1751 in insert_transformed_feature.: calling sparse_feature_cross (from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op) with hash_key=None is deprecated and will be removed after 2016-11-20.\n",
  498. "Instructions for updating:\n",
  499. "The default behavior of sparse_feature_cross is changing, the default\n",
  500. "value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n",
  501. "From that point on sparse_feature_cross will always use FingerprintCat64\n",
  502. "to concatenate the feature fingerprints. And the underlying\n",
  503. "_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n",
  504. "as deprecated.\n",
  505. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  506. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  507. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  508. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  509. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  510. "DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)\n",
  511. "DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string)\n",
  512. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py:1203 in export_savedmodel.: initialize_local_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n",
  513. "Instructions for updating:\n",
  514. "Use `tf.local_variables_initializer` instead.\n",
  515. "INFO:tensorflow:Assets added to graph.\n",
  516. "INFO:tensorflow:No assets to write.\n",
  517. "INFO:tensorflow:SavedModel written to: /models/targeting_linear/1481461170903/saved_model.pb\n"
  518. ]
  519. },
  520. {
  521. "data": {
  522. "text/plain": [
  523. "'/models/targeting_linear/1481461170903'"
  524. ]
  525. },
  526. "execution_count": 18,
  527. "metadata": {},
  528. "output_type": "execute_result"
  529. }
  530. ],
  531. "source": [
  532. "train_and_export(wide, wide_columns)"
  533. ]
  534. },
  535. {
  536. "cell_type": "code",
  537. "execution_count": 10,
  538. "metadata": {
  539. "collapsed": false,
  540. "scrolled": true
  541. },
  542. "outputs": [
  543. {
  544. "name": "stdout",
  545. "output_type": "stream",
  546. "text": [
  547. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn.py:340 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.\n",
  548. "Instructions for updating:\n",
  549. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  550. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  551. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  552. "Example conversion:\n",
  553. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  554. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn.py:340 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.\n",
  555. "Instructions for updating:\n",
  556. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  557. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  558. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  559. "Example conversion:\n",
  560. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  561. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn.py:340 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.\n",
  562. "Instructions for updating:\n",
  563. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  564. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  565. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  566. "Example conversion:\n",
  567. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  568. "DEBUG:tensorflow:Setting feature info for mode train to {'purchase_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'gender': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'age': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'interaction_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'interaction_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'state': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'monetary_value': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'purchase_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False)}.\n",
  569. "DEBUG:tensorflow:Setting labels info for mode train to TensorSignature(dtype=tf.bool, shape=TensorShape([Dimension(100000)]), is_sparse=False)\n",
  570. "DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f20563876e0>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
  571. "DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f2056387500>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
  572. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  573. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  574. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  575. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  576. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  577. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  578. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  579. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  580. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  581. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  582. "INFO:tensorflow:Create CheckpointSaverHook.\n",
  583. "INFO:tensorflow:loss = 1.43177, step = 2001\n",
  584. "INFO:tensorflow:Saving checkpoints for 2001 into /models/targeting_deep/model.ckpt.\n",
  585. "WARNING:tensorflow:*******************************************************\n",
  586. "WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
  587. "WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
  588. "WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
  589. "WARNING:tensorflow:now on by default.\n",
  590. "WARNING:tensorflow:*******************************************************\n",
  591. "INFO:tensorflow:loss = 0.667609, step = 2101\n",
  592. "INFO:tensorflow:global_step/sec: 1.84492\n",
  593. "INFO:tensorflow:loss = 0.665916, step = 2201\n",
  594. "INFO:tensorflow:global_step/sec: 1.73343\n",
  595. "INFO:tensorflow:loss = 0.664725, step = 2301\n",
  596. "INFO:tensorflow:global_step/sec: 1.62747\n",
  597. "INFO:tensorflow:loss = 0.665076, step = 2401\n",
  598. "INFO:tensorflow:global_step/sec: 1.24979\n",
  599. "INFO:tensorflow:loss = 0.663669, step = 2501\n",
  600. "INFO:tensorflow:global_step/sec: 1.50628\n",
  601. "INFO:tensorflow:loss = 0.66299, step = 2601\n",
  602. "INFO:tensorflow:global_step/sec: 1.34333\n",
  603. "INFO:tensorflow:loss = 0.66246, step = 2701\n",
  604. "INFO:tensorflow:global_step/sec: 1.23336\n",
  605. "INFO:tensorflow:loss = 0.661846, step = 2801\n",
  606. "INFO:tensorflow:global_step/sec: 1.50963\n",
  607. "INFO:tensorflow:Saving checkpoints for 2867 into /models/targeting_deep/model.ckpt.\n",
  608. "WARNING:tensorflow:*******************************************************\n",
  609. "WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
  610. "WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
  611. "WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
  612. "WARNING:tensorflow:now on by default.\n",
  613. "WARNING:tensorflow:*******************************************************\n",
  614. "INFO:tensorflow:loss = 0.661311, step = 2901\n",
  615. "INFO:tensorflow:global_step/sec: 1.07092\n",
  616. "INFO:tensorflow:Saving checkpoints for 3000 into /models/targeting_deep/model.ckpt.\n",
  617. "WARNING:tensorflow:*******************************************************\n",
  618. "WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
  619. "WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
  620. "WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
  621. "WARNING:tensorflow:now on by default.\n",
  622. "WARNING:tensorflow:*******************************************************\n",
  623. "INFO:tensorflow:Loss for final step: 0.660779.\n",
  624. "WARNING:tensorflow:export_savedmodel (from tensorflow.contrib.learn.python.learn.estimators.dnn) is experimental and may change or be removed at any time, and without warning.\n",
  625. "WARNING:tensorflow:export_savedmodel (from tensorflow.contrib.learn.python.learn.estimators.estimator) is experimental and may change or be removed at any time, and without warning.\n",
  626. "DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f20563876e0>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
  627. "DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f2056387500>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
  628. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  629. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  630. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  631. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  632. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  633. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py:1203 in export_savedmodel.: initialize_local_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.\n",
  634. "Instructions for updating:\n",
  635. "Use `tf.local_variables_initializer` instead.\n",
  636. "INFO:tensorflow:Assets added to graph.\n",
  637. "INFO:tensorflow:No assets to write.\n",
  638. "INFO:tensorflow:SavedModel written to: /models/targeting_deep/1481293854053/saved_model.pb\n"
  639. ]
  640. },
  641. {
  642. "data": {
  643. "text/plain": [
  644. "'/models/targeting_deep/1481293854053'"
  645. ]
  646. },
  647. "execution_count": 10,
  648. "metadata": {},
  649. "output_type": "execute_result"
  650. }
  651. ],
  652. "source": [
  653. "train_and_export(deep, deep_columns)"
  654. ]
  655. },
  656. {
  657. "cell_type": "code",
  658. "execution_count": 11,
  659. "metadata": {
  660. "collapsed": false,
  661. "scrolled": false
  662. },
  663. "outputs": [
  664. {
  665. "name": "stdout",
  666. "output_type": "stream",
  667. "text": [
  668. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:751 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with y is deprecated and will be removed after 2016-12-01.\n",
  669. "Instructions for updating:\n",
  670. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  671. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  672. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  673. "Example conversion:\n",
  674. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  675. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:751 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with x is deprecated and will be removed after 2016-12-01.\n",
  676. "Instructions for updating:\n",
  677. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  678. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  679. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  680. "Example conversion:\n",
  681. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  682. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:751 in fit.: calling fit (from tensorflow.contrib.learn.python.learn.estimators.estimator) with batch_size is deprecated and will be removed after 2016-12-01.\n",
  683. "Instructions for updating:\n",
  684. "Estimator is decoupled from Scikit Learn interface by moving into\n",
  685. "separate class SKCompat. Arguments x, y and batch_size are only\n",
  686. "available in the SKCompat class, Estimator will only accept input_fn.\n",
  687. "Example conversion:\n",
  688. " est = Estimator(...) -> est = SKCompat(Estimator(...))\n",
  689. "DEBUG:tensorflow:Setting feature info for mode train to {'purchase_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'gender': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'age': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'interaction_recency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'interaction_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'state': TensorSignature(dtype=tf.string, shape=None, is_sparse=True), 'monetary_value': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False), 'purchase_frequency': TensorSignature(dtype=tf.int64, shape=TensorShape([Dimension(100000)]), is_sparse=False)}.\n",
  690. "DEBUG:tensorflow:Setting labels info for mode train to TensorSignature(dtype=tf.bool, shape=TensorShape([Dimension(100000)]), is_sparse=False)\n",
  691. "DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f20563876e0>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
  692. "DEBUG:tensorflow:Transforming feature_column _EmbeddingColumn(sparse_id_column=_SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string), dimension=8, combiner='mean', initializer=<function _initializer at 0x7f2056387500>, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None)\n",
  693. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  694. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  695. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  696. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  697. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  698. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  699. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  700. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  701. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  702. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  703. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:366 in _add_hidden_layer_summary.: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
  704. "Instructions for updating:\n",
  705. "Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.\n",
  706. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:367 in _add_hidden_layer_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
  707. "Instructions for updating:\n",
  708. "Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
  709. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:366 in _add_hidden_layer_summary.: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
  710. "Instructions for updating:\n",
  711. "Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.\n",
  712. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:367 in _add_hidden_layer_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
  713. "Instructions for updating:\n",
  714. "Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
  715. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:366 in _add_hidden_layer_summary.: scalar_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
  716. "Instructions for updating:\n",
  717. "Please switch to tf.summary.scalar. Note that tf.summary.scalar uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, passing a tensor or list of tags to a scalar summary op is no longer supported.\n",
  718. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.py:367 in _add_hidden_layer_summary.: histogram_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.\n",
  719. "Instructions for updating:\n",
  720. "Please switch to tf.summary.histogram. Note that tf.summary.histogram uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on their scope.\n",
  721. "DEBUG:tensorflow:Transforming feature_column _BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65))\n",
  722. "DEBUG:tensorflow:Transforming feature_column _CrossedColumn(columns=(_BucketizedColumn(source_column=_RealValuedColumn(column_name='age', dimension=1, default_value=None, dtype=tf.float32, normalizer=None), boundaries=(10, 14, 18, 21, 25, 30, 35, 40, 50, 55, 60, 65)), _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)), hash_bucket_size=10000, hash_key=None, combiner='sum', ckpt_to_load_from=None, tensor_name_in_ckpt=None)\n",
  723. "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/feature_column.py:1751 in insert_transformed_feature.: calling sparse_feature_cross (from tensorflow.contrib.layers.python.ops.sparse_feature_cross_op) with hash_key=None is deprecated and will be removed after 2016-11-20.\n",
  724. "Instructions for updating:\n",
  725. "The default behavior of sparse_feature_cross is changing, the default\n",
  726. "value for hash_key will change to SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY.\n",
  727. "From that point on sparse_feature_cross will always use FingerprintCat64\n",
  728. "to concatenate the feature fingerprints. And the underlying\n",
  729. "_sparse_feature_cross_op.sparse_feature_cross operation will be marked\n",
  730. "as deprecated.\n",
  731. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  732. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  733. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='interaction_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  734. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  735. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='monetary_value', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  736. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  737. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_frequency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  738. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  739. "DEBUG:tensorflow:Transforming feature_column _RealValuedColumn(column_name='purchase_recency', dimension=1, default_value=None, dtype=tf.float32, normalizer=None)\n",
  740. "WARNING:tensorflow:Rank of input Tensor (1) should be the same as output_rank (2) for column. Will attempt to expand dims. It is highly recommended that you resize your input, as this behavior may change.\n",
  741. "DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='gender', is_integerized=False, bucket_size=None, lookup_config=_SparseIdLookupConfig(vocabulary_file=None, keys=('female', 'male'), num_oov_buckets=0, vocab_size=2, default_value=-1), combiner='sum', dtype=tf.string)\n",
  742. "DEBUG:tensorflow:Transforming feature_column _SparseColumn(column_name='state', is_integerized=False, bucket_size=100, lookup_config=None, combiner='sum', dtype=tf.string)\n",
  743. "INFO:tensorflow:Create CheckpointSaverHook.\n",
  744. "INFO:tensorflow:loss = 1.36845, step = 1006\n",
  745. "INFO:tensorflow:Saving checkpoints for 1006 into /models/targeting_hybrid/model.ckpt.\n",
  746. "WARNING:tensorflow:*******************************************************\n",
  747. "WARNING:tensorflow:TensorFlow's V1 checkpoint format has been deprecated.\n",
  748. "WARNING:tensorflow:Consider switching to the more efficient V2 format:\n",
  749. "WARNING:tensorflow: `tf.train.Saver(write_version=tf.train.SaverDef.V2)`\n",
  750. "WARNING:tensorflow:now on by default.\n",
  751. "WARNING:tensorflow:*******************************************************\n"
  752. ]
  753. },
  754. {
  755. "ename": "KeyboardInterrupt",
  756. "evalue": "",
  757. "output_type": "error",
  758. "traceback": [
  759. "\u001b[0;31m\u001b[0m",
  760. "\u001b[0;31mKeyboardInterrupt\u001b[0mTraceback (most recent call last)",
  761. "\u001b[0;32m<ipython-input-11-273a6ca1390a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrain_and_export\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mhybrid\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwide_columns\u001b[0m \u001b[0;34m+\u001b[0m \u001b[0mdeep_columns\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
  762. "\u001b[0;32m<ipython-input-8-f2badeb6cc49>\u001b[0m in \u001b[0;36mtrain_and_export\u001b[0;34m(model, feature_columns)\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtrain_and_export\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeature_columns\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minput_fn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mlambda\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0minput_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdf_train\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msteps\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1000\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mfeature_spec\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontrib\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlayers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcreate_feature_spec_for_parsing\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfeature_columns\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  763. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined.pyc\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, input_fn, steps, batch_size, monitors, max_steps)\u001b[0m\n\u001b[1;32m 749\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 750\u001b[0m \u001b[0mmonitors\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhooks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 751\u001b[0;31m max_steps=max_steps)\n\u001b[0m\u001b[1;32m 752\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 753\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
  764. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/deprecation.pyc\u001b[0m in \u001b[0;36mnew_func\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 245\u001b[0m \u001b[0m_call_location\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdecorator_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_qualified_name\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfunc\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 246\u001b[0m func.__module__, arg_name, date, instructions)\n\u001b[0;32m--> 247\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 248\u001b[0m new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(\n\u001b[1;32m 249\u001b[0m func.__doc__, date, instructions)\n",
  765. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, x, y, input_fn, steps, batch_size, monitors, max_steps)\u001b[0m\n\u001b[1;32m 362\u001b[0m \u001b[0msteps\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msteps\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 363\u001b[0m \u001b[0mmonitors\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmonitors\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 364\u001b[0;31m max_steps=max_steps)\n\u001b[0m\u001b[1;32m 365\u001b[0m \u001b[0mlogging\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Loss for final step: %s.'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  766. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc\u001b[0m in \u001b[0;36m_train_model\u001b[0;34m(self, input_fn, steps, feed_fn, init_op, init_feed_fn, init_fn, device_fn, monitors, log_every_steps, fail_on_nan_loss, max_steps)\u001b[0m\n\u001b[1;32m 739\u001b[0m \u001b[0mfail_on_nan_loss\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfail_on_nan_loss\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 740\u001b[0m \u001b[0mhooks\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhooks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 741\u001b[0;31m max_steps=max_steps)\n\u001b[0m\u001b[1;32m 742\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 743\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extract_metric_update_ops\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0meval_dict\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  767. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/graph_actions.pyc\u001b[0m in \u001b[0;36m_monitored_train\u001b[0;34m(graph, output_dir, train_op, loss_op, global_step_tensor, init_op, init_feed_dict, init_fn, log_every_steps, supervisor_is_chief, supervisor_master, supervisor_save_model_secs, supervisor_save_model_steps, keep_checkpoint_max, supervisor_save_summaries_secs, supervisor_save_summaries_steps, feed_fn, steps, fail_on_nan_loss, hooks, max_steps)\u001b[0m\n\u001b[1;32m 299\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0msuper_sess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshould_stop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 300\u001b[0m _, loss = super_sess.run([train_op, loss_op], feed_fn() if feed_fn else\n\u001b[0;32m--> 301\u001b[0;31m None)\n\u001b[0m\u001b[1;32m 302\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 303\u001b[0m \u001b[0msummary_io\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mSummaryWriterCache\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclear\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  768. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 471\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 472\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 473\u001b[0;31m run_metadata=run_metadata)\n\u001b[0m\u001b[1;32m 474\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 475\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mshould_stop\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  769. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 626\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 627\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 628\u001b[0;31m run_metadata=run_metadata)\n\u001b[0m\u001b[1;32m 629\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mAbortedError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 630\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  770. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 593\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 594\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 595\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 596\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 597\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
  771. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 727\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 728\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 729\u001b[0;31m run_metadata=run_metadata)\n\u001b[0m\u001b[1;32m 730\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 731\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_hooks\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  772. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/monitored_session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 593\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 594\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 595\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 596\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 597\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
  773. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 765\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 766\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 767\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 768\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 769\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  774. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 963\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 964\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m--> 965\u001b[0;31m feed_dict_string, options, run_metadata)\n\u001b[0m\u001b[1;32m 966\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 967\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  775. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1013\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1014\u001b[0m return self._do_call(_run_fn, self._session, feed_dict, fetch_list,\n\u001b[0;32m-> 1015\u001b[0;31m target_list, options, run_metadata)\n\u001b[0m\u001b[1;32m 1016\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1017\u001b[0m return self._do_call(_prun_fn, self._session, handle, feed_dict,\n",
  776. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1020\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1021\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1022\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1023\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1024\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mas_text\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  777. "\u001b[0;32m/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.pyc\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(session, feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1002\u001b[0m return tf_session.TF_Run(session, options,\n\u001b[1;32m 1003\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1004\u001b[0;31m status, run_metadata)\n\u001b[0m\u001b[1;32m 1005\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1006\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_prun_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msession\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
  778. "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
  779. ]
  780. }
  781. ],
  782. "source": [
  783. "train_and_export(hybrid, wide_columns + deep_columns)"
  784. ]
  785. },
  786. {
  787. "cell_type": "code",
  788. "execution_count": null,
  789. "metadata": {
  790. "collapsed": true
  791. },
  792. "outputs": [],
  793. "source": []
  794. }
  795. ],
  796. "metadata": {
  797. "kernelspec": {
  798. "display_name": "Python 2",
  799. "language": "python",
  800. "name": "python2"
  801. },
  802. "language_info": {
  803. "codemirror_mode": {
  804. "name": "ipython",
  805. "version": 2
  806. },
  807. "file_extension": ".py",
  808. "mimetype": "text/x-python",
  809. "name": "python",
  810. "nbconvert_exporter": "python",
  811. "pygments_lexer": "ipython2",
  812. "version": "2.7.6"
  813. }
  814. },
  815. "nbformat": 4,
  816. "nbformat_minor": 1
  817. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement