| from transformers import TrainingArguments, Trainer |
| from datasets import load_dataset |
| import evaluate |
| import numpy as np |
| from modeling_octagon import OctagonForSequenceClassification, OctagonConfig |
| from tokenization_octagon import OctagonTokenizer |
|
|
| |
| dataset = load_dataset("imdb") |
|
|
| |
| train_dataset = dataset["train"].shuffle(seed=42).select(range(1000)) |
| eval_dataset = dataset["test"].shuffle(seed=42).select(range(200)) |
|
|
| |
| tokenizer = OctagonTokenizer.train_tokenizer( |
| texts=train_dataset["text"], |
| vocab_size=30522, |
| save_path="octagon-tokenizer.json" |
| ) |
|
|
| |
| def tokenize_function(examples): |
| return tokenizer(examples["text"], padding="max_length", truncation=True) |
|
|
| tokenized_train = train_dataset.map(tokenize_function, batched=True) |
| tokenized_eval = eval_dataset.map(tokenize_function, batched=True) |
|
|
| |
| config = OctagonConfig( |
| vocab_size=30522, |
| hidden_size=128, |
| num_hidden_layers=4, |
| num_attention_heads=4, |
| intermediate_size=512, |
| num_labels=2 |
| ) |
|
|
| model = OctagonForSequenceClassification(config) |
|
|
| |
| metric = evaluate.load("accuracy") |
|
|
| def compute_metrics(eval_pred): |
| logits, labels = eval_pred |
| predictions = np.argmax(logits, axis=-1) |
| return metric.compute(predictions=predictions, references=labels) |
|
|
| |
| training_args = TrainingArguments( |
| output_dir="octagon_model", |
| evaluation_strategy="epoch", |
| save_strategy="epoch", |
| learning_rate=2e-5, |
| per_device_train_batch_size=8, |
| per_device_eval_batch_size=8, |
| num_train_epochs=3, |
| weight_decay=0.01, |
| load_best_model_at_end=True, |
| ) |
|
|
| |
| trainer = Trainer( |
| model=model, |
| args=training_args, |
| train_dataset=tokenized_train, |
| eval_dataset=tokenized_eval, |
| compute_metrics=compute_metrics, |
| ) |
|
|
| |
| trainer.train() |
|
|
| |
| model.save_pretrained("octagon_model") |
| tokenizer.save_pretrained("octagon_model") |