Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import cv2
- import numpy as np
- import os
- from random import shuffle
- from tqdm import tqdm
- import tensorflow as tf
- import tflearn
- import itertools
- from tflearn.layers.conv import conv_2d, max_pool_2d
- from tflearn.layers.core import input_data, dropout, fully_connected
- from tflearn.layers.estimator import regression
- TRAIN_DIR = '/Users/xyz/Desktop/train'
- TEST_DIR = '/Users/xyz/Desktop/test'
- IMG_SIZE = 50
- LR = 1e-3
- MODEL_NAME = 'abcvsdef-{}-{}.model'.format(LR, '2conv-basic') # just so we remember which saved model is which, sizes must match
- def label_img(img):
- word_label = img.split('.')[-3]
- # conversion to one-hot array [cat,dog]
- if word_label == 'm':
- return [1,0]
- elif word_label == 'n':
- return [0,1]
- # process the training imnages and their labels into arrays
- def create_train_data():
- training_data = []
- #for img in tqdm(os.listdir(TRAIN_DIR)):
- for root, dirs, files in os.walk(TRAIN_DIR):
- for file1, file2 in itertools.izip_longest(files[::2], files[1::2]):
- img1 = cv2.imread(root + '/' + file1)
- img2 = cv2.imread(root + '/' + file2)
- img1 = cv2.resize(img1,(IMG_SIZE,IMG_SIZE))
- img2 = cv2.resize(img2,(IMG_SIZE,IMG_SIZE))
- image_pairs = np.concatenate((img1, img2), axis=1)
- label = label_img(file1)
- training_data.append([np.array(image_pairs),np.array(label)])
- return training_data
- # train the data
- train_data = create_train_data()
- # construct the CNN
- convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 1], name='input')
- convnet = conv_2d(convnet, 32, 5, activation='relu')
- convnet = max_pool_2d(convnet, 5)
- convnet = conv_2d(convnet, 64, 5, activation='relu')
- convnet = max_pool_2d(convnet, 5)
- convnet = conv_2d(convnet, 128, 5, activation='relu')
- convnet = max_pool_2d(convnet, 5)
- convnet = conv_2d(convnet, 64, 5, activation='relu')
- convnet = max_pool_2d(convnet, 5)
- convnet = conv_2d(convnet, 32, 5, activation='relu')
- convnet = max_pool_2d(convnet, 5)
- convnet = fully_connected(convnet, 1024, activation='relu')
- convnet = dropout(convnet, 0.8)
- convnet = fully_connected(convnet, 2, activation='softmax')
- convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
- model = tflearn.DNN(convnet, tensorboard_dir='log')
- train = train_data[:4]
- test = train_data[4:]
- X = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
- Y = [i[1] for i in train]
- test_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,1)
- test_y = [i[1] for i in test]
- model.fit({'input': X}, {'targets': Y}, n_epoch=10, validation_set=({'input': test_x}, {'targets': test_y}),
- snapshot_step=500, show_metric=True, run_id=MODEL_NAME)
- model.save(MODEL_NAME)
- ---------------------------------
- Run id: abcvsdef-0.001-2conv-basic.model
- Log directory: log/
- ---------------------------------
- Training samples: 24
- Validation samples: 24
- --
- Exception in thread Thread-3:
- Traceback (most recent call last):
- File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 810, in __bootstrap_inner
- self.run()
- File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 763, in run
- self.__target(*self.__args, **self.__kwargs)
- File "/Library/Python/2.7/site-packages/tflearn/data_flow.py", line 187, in fill_feed_dict_queue
- data = self.retrieve_data(batch_ids)
- File "/Library/Python/2.7/site-packages/tflearn/data_flow.py", line 222, in retrieve_data
- utils.slice_array(self.feed_dict[key], batch_ids)
- File "/Library/Python/2.7/site-packages/tflearn/utils.py", line 187, in slice_array
- return X[start]
- IndexError: index 19 is out of bounds for axis 0 with size 4
Add Comment
Please, Sign In to add comment