File size: 1,518 Bytes
16220d7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.optim as optim
# κΈ°λ³Έ λͺ¨λΈ λ‘λ
base_model = torchvision.models.vqa_resnet_finetune(pretrained=True)
# OK-VQA λ°μ΄ν°μ
λ‘λ λ° μ μ²λ¦¬
# (μ¬κΈ°μμλ λ°μ΄ν°λ₯Ό λ‘λνλ μ½λμ μ μ²λ¦¬ κ³Όμ μ κ°λ΅νκ² ννν©λλ€)
train_dataset = OKVQADataset('train_data.json', transform=transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
# μλ‘μ΄ λ μ΄μ΄ μΆκ°
num_classes = len(train_dataset.classes) # μμμμλ λ°μ΄ν°μ
μ ν΄λμ€ μλ₯Ό μ¬μ©
base_model.fc = nn.Linear(base_model.fc.in_features, num_classes)
# GPU μ¬μ© κ°λ₯ μ GPUλ‘ λͺ¨λΈ μ΄λ
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
base_model = base_model.to(device)
# Loss λ° Optimizer μ μ
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(base_model.parameters(), lr=0.001)
# Fine-tuning
num_epochs = 10
for epoch in range(num_epochs):
for inputs, labels in train_loader:
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = base_model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
print(f'Epoch {epoch+1}/{num_epochs}, Loss: {loss.item()}')
torch.save(base_model.state_dict(), 'git-vqa-finetuned-on-ok-vqa.pth')
|