Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from tensorflow.keras import layers
- from tensorflow import keras
- from tensorflow.keras.layers import Input, Dense, LeakyReLU, Activation
- from tensorflow.keras.models import Model, Sequential
- from tensorflow.keras.losses import BinaryCrossentropy
- import tensorflow as tf
- import numpy as np
- import pandas as pd
- # !gdown --id 1-o-1k5OWe1yhsuJDnr8bhHTPz9SwF5T6
- df = pd.read_csv('creditcard.csv')
- df = df[df['Class'] == 0].reset_index(drop=True)
- df_raw = df.drop(["Class", 'Time'], axis = 1)
- df_raw = df_raw.iloc[:,:28].values
- del df
- feat_dim = df_raw.shape[1]
- batch_size = 700
- df_raw_v = np.reshape(df_raw.astype(np.float32), (-1, feat_dim))
- dataset = tf.data.Dataset.from_tensor_slices(df_raw_v)
- dataset = dataset.shuffle(buffer_size=512).batch(batch_size)
- class MyDiscriminator(Model):
- def __init__(self, d_hidden_dim=50, last_activation='tanh', **kwargs):
- super().__init__(**kwargs) # handle standard args (e.g. name)
- self.hidden1 = Dense(d_hidden_dim * 2, name='discriminator_h1', activation=LeakyReLU(0.3))
- self.hidden2 = Dense(d_hidden_dim, name='discriminator_h2', activation=LeakyReLU(0.3))
- self.d_output = Dense(1, name='discriminator_y', activation=last_activation)
- def call(self, inputs, with_feature=False):
- hidden1 = self.hidden1(inputs)
- d_latent_feat = self.hidden2(hidden1)
- d_output = self.d_output(d_latent_feat)
- if with_feature:
- return d_output, d_latent_feat
- else:
- return d_output
- class MyGenerator(Model):
- def __init__(self, output_dim, d_hidden_dim=100, last_activation='tanh', **kwargs):
- super().__init__(**kwargs) # handle standard args (e.g. name)
- self.hidden1 = Dense(d_hidden_dim, name="generator_h1", activation=LeakyReLU(0.3))
- self.g_output = Dense(output_dim, name='generator_x_flat', activation=last_activation)
- def call(self, inputs, with_feature=False):
- g_latent_feat = self.hidden1(inputs)
- g_output = self.g_output(g_latent_feat)
- if with_feature:
- return g_output, g_latent_feat
- else:
- return g_output
- # Create the discriminator
- d_hidden_dim = 50
- # Create the generator
- g_latent_dim = 100
- g_output_dim = feat_dim
- g_noise_dim = 10
- my_disc = Sequential(
- [
- Input(shape=df_raw.shape[1]),
- MyDiscriminator()
- ], name='discriminator_def'
- )
- my_gen = Sequential(
- [
- Input(shape=g_noise_dim),
- MyGenerator(df_raw.shape[1])
- ], name='generator_def'
- )
- my_disc.summary()
- discriminator = Sequential(
- [
- # Input layer
- Input(shape=(g_output_dim,)),
- # Layer 1
- Dense(d_hidden_dim * 2, name="discriminator_h1"),
- LeakyReLU(0.2),
- # Layer 2
- Dense(d_hidden_dim, name="discriminator_h2"),
- LeakyReLU(0.2),
- # Output layer
- Dense(1, name="discriminator_y"),
- Activation("tanh"),
- ],
- name="discriminator",
- )
- generator = keras.Sequential(
- [
- # Input layer
- Input(shape=(g_noise_dim,)),
- # Layer 1
- Dense(int(g_latent_dim), name="generator_h1"),
- LeakyReLU(0.2),
- # Output layer
- Dense(g_output_dim, name="generator_x_flat"),
- Activation('tanh'),
- ],
- name="generator",
- )
- class GAN(keras.Model):
- def __init__(self, discriminator, generator, latent_dim):
- super(GAN, self).__init__()
- self.discriminator = discriminator
- self.generator = generator
- self.latent_dim = latent_dim
- def compile(self, d_optimizer, g_optimizer, d_loss_fn, g_loss_fn):
- super(GAN, self).compile()
- self.d_optimizer = d_optimizer
- self.g_optimizer = g_optimizer
- self.d_loss_fn = d_loss_fn
- self.g_loss_fn = g_loss_fn
- def train_step(self, real_images):
- # Unpack tf.dataset
- if isinstance(real_images, tuple):
- real_images = real_images[0]
- # Sample random points in the latent space
- batch_size = tf.shape(real_images)[0]
- random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
- # Decode them to fake images
- generated_images = self.generator(random_latent_vectors)
- # Train the discriminator
- with tf.GradientTape() as tape:
- predictions = self.discriminator(generated_images)
- predictions_r = self.discriminator(real_images)
- d_loss = self.d_loss_fn(predictions_r, predictions)
- grads = tape.gradient(d_loss, self.discriminator.trainable_weights)
- self.d_optimizer.apply_gradients(zip(grads, self.discriminator.trainable_weights))
- # Sample random points in the latent space
- random_latent_vectors = tf.random.normal(shape=(batch_size, self.latent_dim))
- # Train the generator (note that we should *not* update the weights of the discriminator!)
- with tf.GradientTape() as tape:
- predictions = self.discriminator(self.generator(random_latent_vectors))
- g_loss = self.g_loss_fn(predictions)
- grads = tape.gradient(g_loss, self.generator.trainable_weights)
- self.g_optimizer.apply_gradients(zip(grads, self.generator.trainable_weights))
- return {"d_loss": d_loss, "g_loss": g_loss}
- # Jensen–Shannon divergence
- def discriminator_loss(d_real, d_fake, metrics='JSD'):
- if metrics in ['JSD', 'jsd']:
- real_loss = tf.reduce_mean(
- tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_real),
- logits=d_real))
- fake_loss = tf.reduce_mean(
- tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_fake),
- logits=d_fake))
- return real_loss + fake_loss
- else:
- raise ValueError
- def generator_loss(d_fake, metrics='JSD'):
- if metrics in ['JSD', 'jsd']:
- return tf.reduce_mean(
- tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_fake),
- logits=d_fake))
- else:
- raise ValueError
- gan = GAN(discriminator=discriminator, generator=generator, latent_dim=g_noise_dim)
- # gan = GAN(discriminator=my_disc, generator=my_gen, latent_dim=g_noise_dim)
- gan.compile(d_optimizer=keras.optimizers.Adam(), g_optimizer=keras.optimizers.Adam(),
- d_loss_fn=discriminator_loss, g_loss_fn=generator_loss)
- train_result = gan.fit(dataset, epochs=20, verbose=1)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement