import torch from transformers import AutoModel, AutoTokenizer import matplotlib.pyplot as plt def analyze_model(model_path): model = AutoModel.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path) print("=== Model Architecture ===") print(f"Model parameters: {sum(p.numel() for p in model.parameters()):,}") print(f"Layers: {len(model.encoder.layer) if hasattr(model, 'encoder') else 'N/A'}") # Analyze attention patterns if hasattr(model, 'encoder'): layer = model.encoder.layer[0] print(f"Attention heads: {layer.attention.self.num_attention_heads}") return model, tokenizer def plot_training_metrics(log_file='training.log'): # Parse training logs and create plots # This would read your training logs and create nice graphs pass