Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #https://huggingface.co/transformers/v4.4.2/training.html#fine-tuning-in-native-tensorflow-2
- import tensorflow as tf
- import numpy as np
- gpus = tf.config.experimental.list_physical_devices('GPU')
- if gpus:
- try:
- for gpu in gpus:
- tf.config.experimental.set_memory_growth(gpu, True)
- except RuntimeError as e:
- print(e)
- import tensorflow_datasets as tfds
- from transformers import TFBertForSequenceClassification, BertTokenizer, glue_convert_examples_to_features, AutoConfig
- tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
- config = AutoConfig.from_pretrained('bert-base-cased')
- model = TFBertForSequenceClassification(config)
- n_epochs = 5
- data = tfds.load('glue/mrpc')
- train_dataset = glue_convert_examples_to_features(data['train'], tokenizer, max_length=192, task='mrpc')
- train_dataset = train_dataset.shuffle(100).batch(32).repeat(n_epochs)
- valid_dataset = glue_convert_examples_to_features(data['validation'], tokenizer, max_length=192, task='mrpc')
- valid_dataset = valid_dataset.shuffle(100).batch(32).repeat(n_epochs)
- optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5)
- loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
- model.compile(optimizer=optimizer, loss=loss)
- model.fit(train_dataset, validation_data = valid_dataset, epochs=5, steps_per_epoch=115)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement