Twelve2five's picture
Update app.py
14bbc11 verified
import os
import torch
import glob
import gc
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
TrainingArguments,
Trainer,
DataCollatorForLanguageModeling,
AutoTokenizer,
LlamaConfig,
AutoConfig
)
from peft import LoraConfig, TaskType, get_peft_model, prepare_model_for_kbit_training
from datasets import Dataset
from huggingface_hub import snapshot_download
from tqdm import tqdm
import gradio as gr
import math
from accelerate import Accelerator
import subprocess
import sys
import json
import shutil
import traceback
# --- Configuration ---
YOUR_HF_USERNAME = "Twelve2five"
MODEL_REPO_NAME = "llama-3-8b-rvq-resized"
DATASET_REPO_NAME = "podcast-dialogue-rvq-pairs-3items"
hf_model_repo_id = f"{YOUR_HF_USERNAME}/{MODEL_REPO_NAME}"
hf_dataset_repo_id = f"{YOUR_HF_USERNAME}/{DATASET_REPO_NAME}"
# Output directories
OUTPUT_TRAINING_DIR = "./llama3-8b-rvq-qlora-finetuned-run"
LOGGING_DIR = "./llama3-8b-rvq-qlora-logs-run"
local_download_path = "./downloaded_dataset_files"
# Training parameters
NUM_EPOCHS = 1
BATCH_SIZE_PER_DEVICE = 1
GRAD_ACCUMULATION_STEPS = 64
LEARNING_RATE = 1e-4
WEIGHT_DECAY = 0.01
WARMUP_RATIO = 0.03
LR_SCHEDULER = "cosine"
OPTIMIZER = "paged_adamw_8bit"
MAX_SEQ_LENGTH = 256
MICRO_BATCH_SIZE = 1
# Multi-GPU configuration
accelerator = Accelerator()
# Configure environment for multi-GPU
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:32"
# Print GPU information
print(f"Available GPUs: {torch.cuda.device_count()}")
for i in range(torch.cuda.device_count()):
print(f"GPU {i}: {torch.cuda.get_device_name(i)} with {torch.cuda.get_device_properties(i).total_memory / 1e9:.2f} GB")
def seq2seq_causal_collator(features):
"""
Collator that concatenates context (input_ids) and target (labels)
for Causal LM sequence-to-sequence training.
Masks the loss for the context part of the sequence.
Pads sequences to the maximum length in the batch.
"""
batch = {}
concatenated_input_ids = []
concatenated_labels = []
max_len = 0
# --- First pass: Concatenate, create masked labels, find max length ---
for feature in features:
# Dataset transform should provide tensors here
input_ids = feature['input_ids']
labels = feature['labels']
# Ensure tensors are 1D (handle potential extra dims if any)
if input_ids.dim() > 1: input_ids = input_ids.squeeze()
if labels.dim() > 1: labels = labels.squeeze()
context_len = input_ids.shape[0]
target_len = labels.shape[0]
# Concatenate context and target for input
combined_ids = torch.cat([input_ids, labels], dim=0)
concatenated_input_ids.append(combined_ids)
# Create labels: -100 for context, actual labels for target
masked_labels = torch.cat([
torch.full((context_len,), -100, dtype=torch.long, device=input_ids.device),
labels
], dim=0)
concatenated_labels.append(masked_labels)
# Track max length for padding
if combined_ids.shape[0] > max_len:
max_len = combined_ids.shape[0]
# --- Second pass: Pad to max length ---
padded_input_ids = []
padded_labels = []
input_pad_token_id = 0
label_pad_token_id = -100
for i in range(len(features)):
ids = concatenated_input_ids[i]
lbls = concatenated_labels[i]
padding_len = max_len - ids.shape[0]
# Pad on the right side
padded_input_ids.append(torch.nn.functional.pad(
ids, (0, padding_len), value=input_pad_token_id
))
padded_labels.append(torch.nn.functional.pad(
lbls, (0, padding_len), value=label_pad_token_id
))
# --- Stack and create final batch ---
batch['input_ids'] = torch.stack(padded_input_ids)
batch['labels'] = torch.stack(padded_labels)
# Create attention mask (1 for real tokens, 0 for padding)
batch['attention_mask'] = batch['input_ids'].ne(input_pad_token_id).long()
return batch
def prepare_for_dataset(batch):
output = {'input_ids': [], 'labels': []}
for item in batch:
output['input_ids'].append(item['input_ids'].cpu().tolist())
output['labels'].append(item['labels'].cpu().tolist())
return output
def load_model():
print(f"Loading base model architecture from: {hf_model_repo_id}")
# Get information about GPU with most free memory
gpu_id = 0 # Default to first GPU
max_free_memory = 0
for i in range(torch.cuda.device_count()):
free_memory = torch.cuda.get_device_properties(i).total_memory - torch.cuda.memory_allocated(i)
if free_memory > max_free_memory:
max_free_memory = free_memory
gpu_id = i
print(f"Loading model on GPU {gpu_id} with {max_free_memory / 1e9:.2f}GB free memory")
# Configure quantization
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16
)
# Load the model
try:
# First update transformers to make sure we have latest version
subprocess.check_call([sys.executable, "-m", "pip", "install", "--upgrade", "transformers"])
# Now try loading with explicit config class to avoid auto-detection issues
from transformers import LlamaConfig
# Load config first
config = LlamaConfig.from_pretrained(
hf_model_repo_id,
trust_remote_code=True
)
# Then load model with explicit config
model = AutoModelForCausalLM.from_pretrained(
hf_model_repo_id,
config=config,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True
)
log.append(f"Loaded model vocab size: {model.config.vocab_size}")
log.append(f"Input embedding shape: {model.get_input_embeddings().weight.shape}")
except Exception as e:
error_msg = f"Error loading model from Hub: {e}"
log.append(error_msg)
# Try with a fallback method
try:
log.append("Attempting alternative loading method...")
# Try loading without auto detection
model = AutoModelForCausalLM.from_pretrained(
hf_model_repo_id,
quantization_config=bnb_config,
device_map="auto",
trust_remote_code=True,
torch_dtype=torch.bfloat16,
# Add these to help with the loading
revision="main",
low_cpu_mem_usage=True,
)
log.append("Alternative loading successful!")
log.append(f"Loaded model vocab size: {model.config.vocab_size}")
except Exception as e2:
log.append(f"Alternative loading also failed: {e2}")
return "\n".join(log)
# --- Load Tokenizer (prioritizing Llama 3.2 1B) ---
progress(0.3, desc="Loading tokenizer...")
# Set up token for authentication
token_param = {"token": hf_token} if hf_token and hf_token.strip() else {}
if token_param:
log.append("Using provided Hugging Face token for authentication")
else:
log.append("No token provided, using Space's default authentication")
# Try to load a compatible tokenizer
try:
# First try the actual Llama 3.2 1B tokenizer
tokenizer_repo = "meta-llama/Llama-3.2-1B" # The official 1B model
log.append(f"Attempting to load tokenizer from {tokenizer_repo}...")
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_repo,
padding_side="right",
use_fast=True,
**token_param # Pass token if provided
)
log.append(f"Successfully loaded tokenizer from {tokenizer_repo}")
except Exception as e1:
log.append(f"Couldn't load {tokenizer_repo} tokenizer: {e1}")
# Try the model repo directly (in case it has a tokenizer)
try:
tokenizer = AutoTokenizer.from_pretrained(
hf_model_repo_id, # The RVQ model repo
padding_side="right",
use_fast=True,
**token_param # Pass token if provided
)
log.append(f"Loaded tokenizer from the model repo: {hf_model_repo_id}")
except Exception as e2:
log.append(f"Couldn't load model repo tokenizer: {e2}")
# Continue with our fallbacks (public models don't need token)
try:
# Try TinyLlama (public)
tokenizer = AutoTokenizer.from_pretrained(
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
padding_side="right",
use_fast=True
)
log.append("Loaded TinyLlama tokenizer as fallback")
except Exception as e3:
log.append(f"Couldn't load TinyLlama tokenizer: {e3}")
# Last resort - other public models
try:
tokenizer = AutoTokenizer.from_pretrained(
"microsoft/phi-2", # Public model
padding_side="right"
)
log.append("Loaded Phi-2 tokenizer as last resort")
except Exception as e4:
error_msg = f"Failed to load any compatible tokenizer after multiple attempts: {e4}"
log.append(error_msg)
return "\n".join(log)
# Set pad token if not already set
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token is not None else "<pad>"
log.append("Set pad_token to eos_token or <pad>")
log.append(f"Tokenizer loaded with vocab size: {len(tokenizer)}")
log.append(f"Model vocab size: {model.config.vocab_size}")
log.append(f"Input embedding shape: {model.get_input_embeddings().weight.shape}")
# Prepare model for k-bit training
model = prepare_model_for_kbit_training(model)
# Define LoRA configuration - adjusted for 1B model
lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
r=8, # Smaller rank for 1B model (vs 16 for larger models)
lora_alpha=16, # Adjusted alpha (vs 32 for larger models)
lora_dropout=0.05,
bias="none",
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
)
# Apply LoRA to model
progress(0.4, desc="Applying LoRA to model...")
model_to_train = get_peft_model(model, lora_config)
log.append("LoRA applied to model")
log.append(f"LoRA rank: 8, alpha: 16 (optimized for 1B model)")
model_to_train.print_trainable_parameters()
return model, tokenizer
def load_dataset():
# --- Download the dataset repository files ---
try:
os.makedirs(local_download_path, exist_ok=True)
downloaded_repo_root = snapshot_download(
repo_id=hf_dataset_repo_id,
repo_type="dataset",
local_dir=local_download_path,
local_dir_use_symlinks=False
)
print(f"Dataset repository content downloaded to: {downloaded_repo_root}")
except Exception as e:
print(f"Error downloading dataset: {e}")
return None
# --- Load .pt files into a Hugging Face Dataset object ---
pairs_dir = os.path.join(downloaded_repo_root, "final_rvq_pairs")
all_pair_files = glob.glob(os.path.join(pairs_dir, "*_rvq_pairs.pt"))
if not all_pair_files:
all_pair_files = glob.glob(os.path.join(downloaded_repo_root, "*_rvq_pairs.pt"))
if not all_pair_files:
print("No RVQ pair files found!")
return None
print(f"Found {len(all_pair_files)} RVQ pair files.")
# Load data from .pt files into memory
all_data_pairs = []
for file_path in tqdm(all_pair_files, desc="Loading pair files"):
try:
episode_pairs = torch.load(file_path, map_location='cpu')
all_data_pairs.extend(episode_pairs)
except Exception as e:
print(f"Warning: Could not load file {file_path}: {e}")
if not all_data_pairs:
return None
print(f"Loaded {len(all_data_pairs)} training pairs.")
# Convert to Hugging Face Dataset
chunk_size = 1000
processed_data = {'input_ids': [], 'labels': []}
for i in tqdm(range(0, len(all_data_pairs), chunk_size), desc="Preparing data"):
batch = all_data_pairs[i:i + chunk_size]
prepared_batch = prepare_for_dataset(batch)
processed_data['input_ids'].extend(prepared_batch['input_ids'])
processed_data['labels'].extend(prepared_batch['labels'])
hf_dataset = Dataset.from_dict(processed_data)
# Transform to get tensors back
hf_dataset.set_transform(lambda batch: {
'input_ids': [torch.tensor(ids, dtype=torch.long) for ids in batch['input_ids']],
'labels': [torch.tensor(lbls, dtype=torch.long) for lbls in batch['labels']]
})
# Cleanup
del all_data_pairs
del processed_data
gc.collect()
return hf_dataset
# Memory cleaning function
def clean_memory():
gc.collect()
if torch.cuda.is_available():
for i in range(torch.cuda.device_count()):
with torch.cuda.device(f'cuda:{i}'):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
def train_model(
hf_username,
model_repo_name,
dataset_repo_name,
epochs=1,
batch_size=8,
grad_accum_steps=1,
learning_rate=2e-4,
hf_token=None, # New parameter for token
progress=gr.Progress()
):
progress(0, desc="Setting up environment...")
log = []
# Install sentencepiece if it's not already installed
progress(0.02, desc="Installing required dependencies...")
try:
import sentencepiece
log.append("SentencePiece already installed")
except ImportError:
log.append("Installing SentencePiece...")
try:
subprocess.check_call([sys.executable, "-m", "pip", "install", "sentencepiece"])
log.append("SentencePiece installed successfully")
except Exception as e:
log.append(f"Error installing SentencePiece: {e}")
# Continue anyway, we'll try other tokenizer approaches if this fails
# Clean up any existing model files to save space
if os.path.exists("./model_files"):
try:
shutil.rmtree("./model_files")
except Exception as e:
log.append(f"Warning: Could not remove existing model files: {e}")
if os.path.exists("./downloaded_dataset_files"):
try:
shutil.rmtree("./downloaded_dataset_files")
except Exception as e:
log.append(f"Warning: Could not remove existing dataset files: {e}")
# Print GPU info - using imported torch, not a local variable
if torch.cuda.is_available():
log.append(f"Available GPUs: {torch.cuda.device_count()}")
for i in range(torch.cuda.device_count()):
gpu_name = torch.cuda.get_device_name(i)
gpu_memory = torch.cuda.get_device_properties(i).total_memory / (1024**3)
log.append(f"GPU {i}: {gpu_name} with {gpu_memory:.2f} GB")
# Import required libraries
try:
from datasets import Dataset
from huggingface_hub import snapshot_download
# Don't import torch again, since it's already imported
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers import BitsAndBytesConfig, TrainingArguments, Trainer
from peft import LoraConfig, TaskType, get_peft_model, prepare_model_for_kbit_training
log.append(f"Transformers version: {transformers.__version__}")
log.append(f"PyTorch version: {torch.__version__}")
except ImportError as e:
log.append(f"Error importing libraries: {e}")
return "\n".join(log)
# --- Configuration ---
progress(0.05, desc="Setting up configuration...")
hf_model_repo_id = f"{hf_username}/{model_repo_name}"
hf_dataset_repo_id = f"{hf_username}/{dataset_repo_name}"
log.append(f"Model repo: {hf_model_repo_id}")
log.append(f"Dataset repo: {hf_dataset_repo_id}")
# Check if running on multiple GPUs
n_gpus = torch.cuda.device_count()
log.append(f"Number of GPUs available: {n_gpus}")
# --- DeepSpeed Configuration ---
# Create DeepSpeed config file
progress(0.1, desc="Setting up DeepSpeed configuration...")
# Create a simpler config since we have plenty of memory on A100
ds_config = {
"bf16": {
"enabled": "auto"
},
"zero_optimization": {
"stage": 1, # Lower stage is fine for A100-80GB
"contiguous_gradients": True,
"overlap_comm": True
},
"gradient_accumulation_steps": grad_accum_steps,
"gradient_clipping": 1.0,
"train_batch_size": batch_size * grad_accum_steps * max(1, n_gpus)
}
ds_config_path = "ds_config.json"
with open(ds_config_path, "w") as f:
json.dump(ds_config, f, indent=4)
log.append("DeepSpeed configuration created successfully")
# --- Download and Load Model ---
progress(0.15, desc="Downloading model...")
try:
# Download model files
local_model_path = "./model_files"
snapshot_download(
repo_id=hf_model_repo_id,
local_dir=local_model_path,
use_auth_token=False,
resume_download=True
)
log.append(f"Model files downloaded to {local_model_path}")
# Check and fix the model config if needed
config_path = os.path.join(local_model_path, "config.json")
if os.path.exists(config_path):
with open(config_path, 'r') as f:
config_data = json.load(f)
# Fix the rope_scaling configuration
if 'rope_scaling' in config_data:
if not isinstance(config_data['rope_scaling'], dict):
config_data['rope_scaling'] = {"type": "linear", "factor": 2.0}
elif 'rope_type' in config_data['rope_scaling']:
# Convert complex rope_scaling to the simple format expected
rope_factor = config_data['rope_scaling'].get('factor', 2.0)
config_data['rope_scaling'] = {"type": "linear", "factor": rope_factor}
# Write the updated config back
with open(config_path, 'w') as f:
json.dump(config_data, f, indent=2)
log.append("Updated model configuration for rope_scaling")
# Create a bnb configuration for loading the model in 4-bit
progress(0.25, desc="Loading model...")
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.bfloat16,
bnb_4bit_use_double_quant=False
)
# Load the model with fixed configuration
model = AutoModelForCausalLM.from_pretrained(
local_model_path,
quantization_config=bnb_config,
device_map="auto",
use_cache=False, # Needed for gradient checkpointing
trust_remote_code=True, # Following reference code
torch_dtype=torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16,
)
# --- Load Tokenizer (prioritizing Llama 3.2 1B) ---
progress(0.3, desc="Loading tokenizer...")
# Set up token for authentication
token_param = {"token": hf_token} if hf_token and hf_token.strip() else {}
if token_param:
log.append("Using provided Hugging Face token for authentication")
else:
log.append("No token provided, using Space's default authentication")
# Try to load a compatible tokenizer
try:
# First try the actual Llama 3.2 1B tokenizer
tokenizer_repo = "meta-llama/Llama-3.2-1B" # The official 1B model
log.append(f"Attempting to load tokenizer from {tokenizer_repo}...")
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_repo,
padding_side="right",
use_fast=True,
**token_param # Pass token if provided
)
log.append(f"Successfully loaded tokenizer from {tokenizer_repo}")
except Exception as e1:
log.append(f"Couldn't load {tokenizer_repo} tokenizer: {e1}")
# Try the model repo directly (in case it has a tokenizer)
try:
tokenizer = AutoTokenizer.from_pretrained(
hf_model_repo_id, # The RVQ model repo
padding_side="right",
use_fast=True,
**token_param # Pass token if provided
)
log.append(f"Loaded tokenizer from the model repo: {hf_model_repo_id}")
except Exception as e2:
log.append(f"Couldn't load model repo tokenizer: {e2}")
# Continue with our fallbacks (public models don't need token)
try:
# Try TinyLlama (public)
tokenizer = AutoTokenizer.from_pretrained(
"TinyLlama/TinyLlama-1.1B-Chat-v1.0",
padding_side="right",
use_fast=True
)
log.append("Loaded TinyLlama tokenizer as fallback")
except Exception as e3:
log.append(f"Couldn't load TinyLlama tokenizer: {e3}")
# Last resort - other public models
try:
tokenizer = AutoTokenizer.from_pretrained(
"microsoft/phi-2", # Public model
padding_side="right"
)
log.append("Loaded Phi-2 tokenizer as last resort")
except Exception as e4:
error_msg = f"Failed to load any compatible tokenizer after multiple attempts: {e4}"
log.append(error_msg)
return "\n".join(log)
# Set pad token if not already set
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token if tokenizer.eos_token is not None else "<pad>"
log.append("Set pad_token to eos_token or <pad>")
log.append(f"Tokenizer loaded with vocab size: {len(tokenizer)}")
log.append(f"Model vocab size: {model.config.vocab_size}")
log.append(f"Input embedding shape: {model.get_input_embeddings().weight.shape}")
# --- QLoRA Preparation ---
progress(0.35, desc="Preparing model for k-bit training...")
model = prepare_model_for_kbit_training(model)
log.append("Model prepared for k-bit training")
# Define LoRA configuration - adjusted for 1B model
lora_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
r=8, # Smaller rank for 1B model (vs 16 for larger models)
lora_alpha=16, # Adjusted alpha (vs 32 for larger models)
lora_dropout=0.05,
bias="none",
target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"]
)
# Apply LoRA to model
progress(0.4, desc="Applying LoRA to model...")
model_to_train = get_peft_model(model, lora_config)
log.append("LoRA applied to model")
log.append(f"LoRA rank: 8, alpha: 16 (optimized for 1B model)")
model_to_train.print_trainable_parameters()
# Cleanup to free up memory
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
except Exception as e:
error_msg = f"Error preparing model for training: {str(e)}"
log.append(error_msg)
return "\n".join(log)
# --- Download and Load Dataset ---
progress(0.45, desc="Downloading dataset...")
log.append(f"Downloading dataset from {hf_dataset_repo_id}...")
try:
# Download the dataset files
local_dataset_path = "./downloaded_dataset_files"
# Correctly specify repo_type as "dataset"
snapshot_download(
repo_id=hf_dataset_repo_id,
local_dir=local_dataset_path,
repo_type="dataset", # Important! Specifies this is a dataset repo
token=hf_token if hf_token and hf_token.strip() else None, # Use token for auth
resume_download=True
)
log.append(f"Dataset files downloaded to {local_dataset_path}")
# Check the structure of the downloaded files
log.append("Checking downloaded dataset structure...")
downloaded_files = glob.glob(f"{local_dataset_path}/**/*.pt", recursive=True)
log.append(f"Found {len(downloaded_files)} .pt files in the dataset directory")
# Look for the pairs directory (we know this exists from the log)
pairs_dir = os.path.join(local_dataset_path, "final_rvq_pairs")
log.append(f"Using pairs directory: {pairs_dir}")
pt_files = glob.glob(f"{pairs_dir}/*.pt")
log.append(f"Found {len(pt_files)} .pt files in pairs directory")
# Load the dataset from the files
progress(0.5, desc="Loading pairs from dataset files...")
log.append("Loading dataset pairs...")
try:
# Load a single file first to understand its structure
sample_file = pt_files[0]
sample_data = torch.load(sample_file)
log.append(f"Sample data type: {type(sample_data)}")
# Function to recursively explore the data structure
def explore_data(data, prefix=""):
if isinstance(data, (list, tuple)):
log.append(f"{prefix}List/Tuple with {len(data)} items")
if len(data) > 0:
explore_data(data[0], prefix + " [0]: ")
elif isinstance(data, dict):
log.append(f"{prefix}Dictionary with keys: {list(data.keys())}")
for key in list(data.keys())[:2]: # Look at first 2 keys
explore_data(data[key], prefix + f" ['{key}']: ")
elif isinstance(data, torch.Tensor):
log.append(f"{prefix}Tensor with shape {data.shape} and dtype {data.dtype}")
else:
log.append(f"{prefix}Other type: {type(data)}")
# Explore the sample data
explore_data(sample_data, "Sample data: ")
# Function to extract tensor data from complex structures
def extract_tensor_data(data):
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, (list, tuple)) and len(data) > 0:
if all(isinstance(item, (int, float)) for item in data):
return torch.tensor(data)
# For lists of tensors/complex structures, use the first item
return extract_tensor_data(data[0])
elif isinstance(data, dict):
# Try common keys for input data
for key in ['input_ids', 'prompt', 'source', 'inputs', 'data']:
if key in data:
return extract_tensor_data(data[key])
# If none found, use the first key
if len(data) > 0:
return extract_tensor_data(next(iter(data.values())))
return None
# Process all files
input_ids_list = []
labels_list = []
# Capture any errors for later analysis
file_errors = []
for i, pt_file in enumerate(tqdm(pt_files, desc="Loading .pt files")):
try:
data = torch.load(pt_file)
if isinstance(data, (list, tuple)) and len(data) >= 2:
# Standard format: list/tuple with [input, label]
input_tensor = extract_tensor_data(data[0])
label_tensor = extract_tensor_data(data[1])
if input_tensor is not None and label_tensor is not None:
input_ids_list.append(input_tensor)
labels_list.append(label_tensor)
else:
file_errors.append(f"Could not extract tensors from {pt_file}")
else:
log.append(f"File {pt_file} has unexpected format. Skipping.")
file_errors.append(f"Unexpected format in {pt_file}: {type(data)}")
except Exception as e:
file_errors.append(f"Error processing file {pt_file}: {str(e)}")
# Log errors if any
if file_errors:
log.append(f"Encountered {len(file_errors)} errors during file processing:")
for i, error in enumerate(file_errors[:5]): # Log first 5 errors
log.append(f" Error {i+1}: {error}")
if len(file_errors) > 5:
log.append(f" ...and {len(file_errors) - 5} more errors")
log.append(f"Successfully processed {len(input_ids_list)} input/label pairs")
# Verify all tensors are valid
valid_pairs = []
for i, (inputs, labels) in enumerate(zip(input_ids_list, labels_list)):
# Perform safety checks on tensors
if not isinstance(inputs, torch.Tensor) or not isinstance(labels, torch.Tensor):
log.append(f"Pair {i}: Invalid tensor types - skipping")
continue
# Ensure tensors contain integers
try:
inputs = inputs.long()
labels = labels.long()
# Convert to lists and add to valid pairs
valid_pairs.append((inputs.tolist(), labels.tolist()))
# Log some diagnostics for the first few pairs
if i < 3:
log.append(f"Pair {i}: Input shape: {inputs.shape}, Label shape: {labels.shape}")
except Exception as e:
log.append(f"Error converting tensors for pair {i}: {str(e)}")
# Create the dataset
log.append(f"Creating dataset from {len(valid_pairs)} valid pairs...")
processed_inputs = [pair[0] for pair in valid_pairs]
processed_labels = [pair[1] for pair in valid_pairs]
dataset = Dataset.from_dict({
"input_ids": processed_inputs,
"labels": processed_labels
})
# Split into training and validation
train_test_split = dataset.train_test_split(test_size=0.05)
train_dataset = train_test_split["train"]
val_dataset = train_test_split["test"]
log.append(f"Created dataset with {len(train_dataset)} training examples and {len(val_dataset)} validation examples")
except Exception as e:
import traceback
error_msg = f"Error processing dataset: {str(e)}\n{traceback.format_exc()}"
log.append(error_msg)
return "\n".join(log)
except Exception as e:
error_msg = f"Error loading dataset: {str(e)}"
log.append(error_msg)
return "\n".join(log)
# --- Training Arguments ---
progress(0.75, desc="Setting up training arguments...")
output_dir = f"./results_{model_repo_name}"
os.makedirs(output_dir, exist_ok=True)
# For 1B model on A100, we can increase batch size and reduce gradient accumulation
training_args = TrainingArguments(
output_dir=output_dir,
num_train_epochs=float(epochs),
per_device_train_batch_size=8, # Larger batch size for 1B model
gradient_accumulation_steps=1, # Reduced for 1B model
learning_rate=learning_rate,
weight_decay=0.01,
logging_dir=f"{output_dir}/logs",
logging_steps=10,
save_steps=50,
save_total_limit=3,
remove_unused_columns=False,
push_to_hub=False,
disable_tqdm=False,
warmup_ratio=0.03,
lr_scheduler_type="cosine",
report_to="tensorboard",
bf16=True if torch.cuda.is_bf16_supported() else False,
fp16=False, # Using BF16 instead
gradient_checkpointing=True, # Still useful for efficiency
gradient_checkpointing_kwargs={'use_reentrant': False},
ddp_find_unused_parameters=False,
deepspeed=ds_config_path if n_gpus > 1 else None, # Only use DeepSpeed for multi-GPU
)
# --- Initialize Trainer ---
progress(0.8, desc="Initializing trainer...")
trainer = Trainer(
model=model_to_train,
args=training_args,
train_dataset=train_dataset,
data_collator=data_collator,
)
log.append("Trainer initialized for training.")
# --- Start Training ---
# Clear cache before starting
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
try:
progress(0.85, desc="Starting training...")
log.append("Starting training...")
train_result = trainer.train()
progress(0.95, desc="Saving model...")
# Save final model (adapter weights) and training state
final_save_path = os.path.join(training_args.output_dir, "final_checkpoint")
log.append(f"Saving final model checkpoint to {final_save_path}...")
trainer.save_model(final_save_path)
trainer.save_state()
# Log metrics
metrics = train_result.metrics
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
for key, value in metrics.items():
log.append(f"{key}: {value}")
except Exception as e:
error_msg = f"An error occurred during training: {e}"
log.append(error_msg)
return "\n".join(log)
progress(1.0, desc="Training complete!")
log.append("Training process complete.")
return "\n".join(log)
# Define the Gradio interface
def create_interface():
with gr.Blocks(title="Llama 3.2 1B RVQ Fine-tuning") as demo:
gr.Markdown("# Llama 3.2 1B RVQ LoRA Fine-tuning")
gr.Markdown("Fine-tune a Llama 3.2 1B model with RVQ token embeddings using LoRA")
with gr.Row():
with gr.Column():
hf_username = gr.Textbox(label="HuggingFace Username", value="Twelve2five")
model_repo = gr.Textbox(label="Model Repository Name", value="llama-3.2-1b-rvq")
dataset_repo = gr.Textbox(label="Dataset Repository Name", value="podcast-dialogue-rvq-pairs-3items")
hf_token = gr.Textbox(
label="Hugging Face Token (Optional)",
placeholder="Enter your HF token to access gated models",
type="password"
)
with gr.Column():
epochs = gr.Number(label="Number of Epochs", value=3, minimum=1, maximum=10)
batch_size = gr.Number(label="Batch Size per Device", value=8, minimum=1, maximum=16)
grad_accum = gr.Number(label="Gradient Accumulation Steps", value=1, minimum=1, maximum=16)
lr = gr.Number(label="Learning Rate", value=2e-4)
start_btn = gr.Button("Start Training")
output = gr.Textbox(label="Training Log", lines=20)
start_btn.click(
fn=train_model,
inputs=[hf_username, model_repo, dataset_repo, epochs, batch_size, grad_accum, lr, hf_token],
outputs=output
)
return demo
# Create and launch the interface
demo = create_interface()
if __name__ == "__main__":
demo.launch()