File size: 838 Bytes
f592f9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
import torch
from transformers import AutoModel, AutoTokenizer
import matplotlib.pyplot as plt
def analyze_model(model_path):
model = AutoModel.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
print("=== Model Architecture ===")
print(f"Model parameters: {sum(p.numel() for p in model.parameters()):,}")
print(f"Layers: {len(model.encoder.layer) if hasattr(model, 'encoder') else 'N/A'}")
# Analyze attention patterns
if hasattr(model, 'encoder'):
layer = model.encoder.layer[0]
print(f"Attention heads: {layer.attention.self.num_attention_heads}")
return model, tokenizer
def plot_training_metrics(log_file='training.log'):
# Parse training logs and create plots
# This would read your training logs and create nice graphs
pass |