Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #create model and train gan wich can generate images by phrase
- def word2image(self, text, idx_top=10):
- # fetch image
- image = np.random.randint(0, 16, (1, 64, 64)) # CHECKS: reduce from python>0 to 64 to load less memory
- # resize picture to 32, 16, 16
- image = tf.image.resize_images(image, (32, 16, 16))
- # put text to image and get back image
- image = tf.keras.preprocessing.text.image(
- text,
- img_channels=2,
- shape=[1, 64, 64],
- char_indices=[idx_top],
- bounds=[[0, 64], [0, 64]]
- )
- return image
- def create_model(self, batch_size):
- model = tf.keras.models.Sequential(
- [
- # Input layer
- tf.keras.layers.InputLayer(input_shape=(None, 64, 64)),
- # Conv layer
- tf.keras.layers.Conv2D(32, (8, 8), activation='relu', padding='same', name='conv1')(
- inputs=self.input),
- # Batch normalization layer
- tf.keras.layers.BatchNormalization(),
- # Maxpool layer
- tf.keras.layers.MaxPooling2D((2, 2)),
- # Conv layer
- tf.keras.layers.Conv2D(32, (8, 8), activation='relu', padding='same', name='conv2')(
- inputs=self.conv1),
- # Batch normalization layer
- tf.keras.layers.BatchNormalization(),
- # Maxpool layer
- tf.keras.layers.MaxPooling2D((2, 2)),
- # Conv layer
- tf.keras.layers.Conv2D(64, (8, 8), activation='relu', padding='same', name='conv3')(
- inputs=self.conv2),
- # Batch normalization layer
- tf.keras.layers.BatchNormalization(),
- # Conv layer
- tf.keras.layers.Conv2D(64, (8, 8), activation='relu', padding='same', name='conv4')(
- inputs=self.conv3),
- # Batch normalization layer
- tf.keras.layers.BatchNormalization(),
- # Maxpool layer
- tf.keras.layers.MaxPooling2D((2, 2)),
- # Conv layer
- tf.keras.layers.Conv2D(64, (8, 8), activation='relu', padding='same', name='conv5')(
- inputs=self.conv4),
- # Batch normalization layer
- tf.keras.layers.BatchNormalization(),
- # Maxpool layer
- tf.keras.layers.MaxPooling2D((2, 2)),
- # Conv layer
- tf.keras.layers.Conv2D(64, (8, 8), activation='relu', padding='same', name='conv6')(
- inputs=self.conv5),
- # Batch normalization layer
- tf.keras.layers.BatchNormalization(),
- # Maxpool layer
- tf.keras.layers.MaxPooling2D((2, 2)),
- # Flatten layer
- tf.keras.layers.Flatten(),
- # Dense layer
- tf.keras.layers.Dense(64, activation='relu')(self.pool5),
- # Dense layer
- tf.keras.layers.Dense(1, activation='sigmoid')(self.loss),
- ],
- input_shape=(1, 64, 64)
- )
- model.compile(
- tf.keras.optimizers.Adam(self.adam_beta1),
- loss=self.loss,
- metrics=['accuracy', tf.keras.metrics.SparseCategoricalAccuracy()]
- )
- return model
- def train(self, batch_size):
- model = self.create_model(batch_size)
- train_img_list = list()
- batch_img_list = list()
- count = 0
- while count < 10000:
- img, loss = model.fit(
- train_img_list,
- batch_img_list,
- batch_size=batch_size,
- epochs=1,
- validation_data=(self.valid_img_list, self.valid_batch_img_list))
- count += 1
- if count % 100 == 0:
- tf.summary.image('train_img', train_img_list[0])
- tf.summary.image('train_batch_img', train_img_list[1])
- tf.summary.image('valid_img', valid_img_list[0])
- tf.summary.image('valid_batch_img', valid_img_list[1])
- print('count:', count)
- print('loss:', loss)
- print(
- 'train loss: ',
- tf.summary.histogram('loss', loss)
- )
- print(
- 'valid loss: ',
- tf.summary.histogram('loss', loss)
- )
- if loss < 0:
- self.word2image(valid_img_list[0]) # CHECKS: get back with array(loading>0)
- self.word2image(valid_batch_img_list[1])
- count = 0
- while count < 10000:
- img = self.word2img(self.valid_img_list[0])
- loss = self.loss(self.valid_batch_img_list[1])
- acc = self.loss(self.valid_img_list[1])
- self.acc_img_list.append(self.create_image(1, 64, 64))
- self.count_img_list.append(
- self.create_image(self.valid_img_list[0], 64, 64)
- # [self.loss]
- )
- count += 1
- if count % 100 == 0:
- tf.summary.image(
- 'train_img', train_img_list[0])
- tf.summary.image(
- 'train_batch_img', train_img_list[1])
- tf.summary.image(
- 'valid_img', valid_img_list[0])
- tf.summary.image(
- 'valid_batch_img', valid_img_list[1])
- print('count:', count)
- print('loss:', loss)
- print('train loss: ',
- tf.summary.image(
- 'train_loss', loss)
- )
- print(
- 'train loss: ',
- tf.summary.image(
- 'train_loss', loss)
- )
- print(
- 'valid loss: ',
- tf.summary.image(
- 'valid_loss', loss)
- )
- print(
- 'train loss: ',
- tf.summary.image(
- 'train_loss', loss)
- )
- print(
- 'train loss: ',
- tf.summary.image(
- 'train_loss', loss)
- )
- print(
- 'train loss: ',
- tf.summary.image(
- 'train_loss', loss)
- )
- print(
- 'train loss: ',
- tf.summary.image(
- 'train_loss', loss)
- )
- print(
- 'train loss: ',
- tf.summary.image(
- 'train_loss', loss)
- )
- count = 0
- while count < 10000:
- img = self.word2img(self.valid_img_list[0])
- img = self.word2img(self.valid_batch_img_list[1])
- loss = self.loss(self.valid_batch_img_list[1])
- loss_ = self.loss(self.valid_img_list[1])
- train_img_list[0] = self.create_img(self.valid_img_list[0])
- train_img_list[1] = self.create_img(
- self.valid_batch_img_list[1]
- )
- train_img_list[0] = self.create_img(self.valid_img_list[0])
- train_img_list[1] = self.valid_batch_img_list[1]
- loss_ = self.loss(self.valid_img_list[1])
- if self.valid_batch_img_list[0] is self.loss:
- count_img_list = list()
- for_img_list in self.valid_img_list[0]:
- for_img_list in self.valid_batch_img_list[1]:
- img, loss = model.fit(
- train_img_list,
- batch_img_list,
- batch_size=batch_size,
- epochs=1,
- validation_data=(self.valid_img_list, self.valid_batch_img_list))
- count_img_list.append(img)
- train_img_list[0] = valid_img_list[0]
- train_img_list[1] = valid_batch_img_list[1]
- train_batch_img_list[0] = valid_img_list[0]
- train_batch_img_list[1] = valid_batch_img_list[1]
- loss_ = loss
- loss_ = loss
- train_loss = loss
- loss_ = loss
- for i in loss_:
- if i > 5000000:
- loss_ = 'aa'
- loss_ = 'aa'
- else:
- loss_ = 'b'
- loss_ = 'bb'
- loss_ = 'cc'
- loss = loss_
- loss_ = loss
- train_loss += loss_
- test_loss += loss_
- train_loss /= self.voc_size
- test_loss /= self.voc_size
- loss_ = loss_
- print(
- 'loss: ', loss
- )
- print(
- 'train loss: ',
- tf.summary.image('train_loss', loss)
- )
- print(
- 'test loss: ',
- tf.summary.image('test_loss', loss_)
- )
- tf.summary.image('train_loss', loss_)
- if i > 1000:
- print(' test loss: ', loss_)
- loss_ = loss
- train_loss = loss_
- print(
- 'train loss: ',
- tf.summary.image('train_loss', loss_)
- )
- for i in loss_:
- if i > 1000:
- print(' test loss: ', loss_ )
- else:
- print(' test loss: ', loss_ )
- loss_ = loss
- print(' train_loss: ', train_loss)
- print(' train_loss: ', loss_ )
- tf.summary.image('train_loss', loss_)
- loss = loss_
- print(' train_loss: ', loss_)
- loss_ = loss_
- train_loss += train_loss
- print(' train_loss: ', loss_)
- tf.summary.image('train_loss', loss_)
- train_loss += loss
- test_loss = 0
- loss_ = loss_
- loss_ = train_loss
- loss_ = loss_
- train_loss = loss_
- if test_loss > 0:
- loss_ = loss
- test_loss = loss_
- print(
- ' test_loss: ', loss_)
- training_data = loss_
- for i in train_datas:
- loss_ = i
- train_loss += i
- test_loss /= self.voc_size
- loss_ = loss_
- train_loss += loss
- loss = loss_
- loss_ = loss
- loss_ = loss_
- print(' test_loss: ', loss)
- if(i>1000):
- print(' test_loss: ', loss)
- print(' test_loss: ', loss_)
- train_loss += loss_
- train_loss = loss_
- average_loss = i
- sum_up_i += i
- correct_label = 0
- last_i += i
- if epochs_ == 'top' and i > 1000:
- print(' test_loss: ', loss_)
- if sum_up_i > 1000:
- for j in idx:
- print(
- '', i, 't %d / %d ', i + 1, 'corr' % (i + 1, j), 'data' % (j, i))
- for j in idx:
- print(
- '', i, '%d / %d ', i + 1, 'corr' % (i + 1, j), 'data' % (j, i))
- print(' %d.\n', sum_up_i, sum_up_i, 100.0, 100.0)
- sys.stdout.flush()
- if sum_up_i >= 1000:
- for i in idx:
- print(' ', i, '%d / %d ', i + 1, 'correct' % (i + 1, i + 1))
- print('%d.\n', sum_up_i)
- print(
- 'Received score of %s.\nPlease try to achieve %s.\n\n
- \nCompute statistics: %.3f / %.3f\n\n \n'
- '\n
- \nApplication: %.3f / %.3f\n\n',
- sum_up_i, 100.0, 100.0)
- for i in idx:
- print(' %.3f / %.3f ', i + 1, 'correct' % (i + 1, i + 1))
- print('score=%s \n')
- for i in idx:
- print(' %.3f / %.3f ', i, 'correct' % (i, i))
- print('')
- last_i = i
- num_test_datas = (num_test_datas, i)
- num_iterations = num_iterations + 1
- loss_ = 0
- log_file.close()
- #skipped
- # parsed
- time.sleep(time.time + time.cancel)
- # quit = false
- # false #
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement