Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- def train(trainDB, testDB, n_iter, batch_size, evaluate_every, test_size,
- loss_every):
- print("Starting training process!")
- print("-------------------------------------")
- best = -1
- t_start = time.time()
- inputs=trainDB.getTripletTrainData(batch_size)
- targets=np.ones([batch_size])
- for i in range(0, n_iter):
- loss=tripletNet.fit(inputs, targets)
- #print("Loss: {0}".format(loss))
- if i % evaluate_every == 0:
- print("Time for {0} iterations: {1}".format(i, time.time()-t_start))
- val_acc = self.test_oneshot(testDB, test_size)
- if val_acc > best:
- print("Current best: {0}, previous best: {1}".format(val_acc, best))
- print("Saving weights to: {0} n".format(weights_path))
- self.tripletNet.save_weights(weights_path)
- best=val_acc
- if i % loss_every == 0:
- print("iteration {}, training loss: {:.2f},".format(i,loss))
- Starting training process!
- -------------------------------------
- ---------------------------------------------------------------------------
- AttributeError Traceback (most recent call last)
- <ipython-input-163-b5442e61de2d> in <module>()
- ----> 1 train(trainDatabase, testDatabase, n_iter, batch_size, evaluate_every, test_size, loss_every)
- 5 frames
- <ipython-input-161-f417f0ebcfc7> in train(trainDB, testDB, n_iter, batch_size, evaluate_every, test_size, loss_every)
- 10
- 11 for i in range(0, n_iter):
- ---> 12 loss=tripletNet.fit(inputs, targets)
- 13
- 14 #print("Loss: {0}".format(loss))
- /usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
- 950 sample_weight=sample_weight,
- 951 class_weight=class_weight,
- --> 952 batch_size=batch_size)
- 953 # Prepare validation data.
- 954 do_validation = False
- /usr/local/lib/python3.6/dist-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
- 749 feed_input_shapes,
- 750 check_batch_axis=False, # Don't enforce the batch size.
- --> 751 exception_prefix='input')
- 752
- 753 if y is not None:
- /usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
- 90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
- 91 data = [data]
- ---> 92 data = [standardize_single_array(x) for x in data]
- 93
- 94 if len(data) != len(names):
- /usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in <listcomp>(.0)
- 90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
- 91 data = [data]
- ---> 92 data = [standardize_single_array(x) for x in data]
- 93
- 94 if len(data) != len(names):
- /usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in standardize_single_array(x)
- 25 'Got tensor with shape: %s' % str(shape))
- 26 return x
- ---> 27 elif x.ndim == 1:
- 28 x = np.expand_dims(x, 1)
- 29 return x
- **AttributeError: 'generator' object has no attribute 'ndim'**
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement