Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import kagglehub
- from kagglehub import KaggleDatasetAdapter
- import pandas as pd
- import json
- import torch
- from transformers import AutoTokenizer, AutoModelForSequenceClassification, Trainer, TrainingArguments
- from sklearn.metrics import f1_score
- import numpy as np
- from datasets import Dataset
- # Set random seed for reproducibility
- torch.manual_seed(42)
- np.random.seed(42)
- # 1. Load Datasets
- train_file_path = "train.jsonl" # Update if different (e.g., for Subtask A)
- test_file_path = "test.jsonl" # Update if different
- label_map_file = "label_to_id.json" # Update if different
- # Load training and test data
- train_df = kagglehub.load_dataset(
- KaggleDatasetAdapter.PANDAS,
- "daniilor/semeval-2026-task13",
- train_file_path
- )
- test_df = kagglehub.load_dataset(
- KaggleDatasetAdapter.PANDAS,
- "daniilor/semeval-2026-task13",
- test_file_path
- )
- # Load label mappings
- label_to_id_path = kagglehub.dataset_download("daniilor/semeval-2026-task13", label_map_file)
- with open(label_to_id_path, 'r', encoding='utf-8') as f:
- label_to_id = json.load(f)
- id_to_label = {v: k for k, v in label_to_id.items()}
- # Verify data
- print("Training Data (first 5 records):")
- print(train_df.head())
- print("\nTest Data (first 5 records):")
- print(test_df.head())
- print("\nLabel Mappings:", label_to_id)
- # 2. Preprocess Data
- # Initialize GraphCodeBERT tokenizer
- tokenizer = AutoTokenizer.from_pretrained("microsoft/graphcodebert-base")
- def tokenize_function(examples):
- return tokenizer(examples['code'], padding="max_length", truncation=True, max_length=512)
- # Convert to Hugging Face Dataset
- train_dataset = Dataset.from_pandas(train_df[['code', 'label']])
- test_dataset = Dataset.from_pandas(test_df[['id', 'code']])
- # Tokenize datasets
- train_dataset = train_dataset.map(tokenize_function, batched=True)
- test_dataset = test_dataset.map(tokenize_function, batched=True)
- # Set format for PyTorch
- train_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'label'])
- test_dataset.set_format('torch', columns=['input_ids', 'attention_mask', 'id'])
- # Split training data for validation
- train_dataset, val_dataset = train_dataset.train_test_split(test_size=0.1, seed=42).values()
- # 3. Define Model
- model = AutoModelForSequenceClassification.from_pretrained(
- "microsoft/graphcodebert-base",
- num_labels=2 # Binary classification for Subtask A
- )
- # 4. Define Metrics
- def compute_metrics(pred):
- labels = pred.label_ids
- preds = pred.predictions.argmax(-1)
- macro_f1 = f1_score(labels, preds, average='macro')
- return {"macro_f1": macro_f1}
- # 5. Set Training Arguments
- training_args = TrainingArguments(
- output_dir="./graphcodebert_finetuned",
- evaluation_strategy="epoch",
- learning_rate=2e-5,
- per_device_train_batch_size=16,
- per_device_eval_batch_size=16,
- num_train_epochs=3,
- weight_decay=0.01,
- save_strategy="epoch",
- load_best_model_at_end=True,
- metric_for_best_model="macro_f1",
- seed=42
- )
- # 6. Initialize Trainer
- trainer = Trainer(
- model=model,
- args=training_args,
- train_dataset=train_dataset,
- eval_dataset=val_dataset,
- compute_metrics=compute_metrics
- )
- # 7. Train Model
- trainer.train()
- # 8. Generate Predictions
- predictions = trainer.predict(test_dataset)
- pred_labels = np.argmax(predictions.predictions, axis=1)
- # 9. Prepare Submission File
- submission_df = pd.DataFrame({
- 'id': test_dataset['id'],
- 'label': pred_labels
- })
- submission_df.to_csv('submission_graphcodebert.csv', index=False)
- print("Submission file saved as 'submission_graphcodebert.csv'")
Advertisement
Add Comment
Please, Sign In to add comment