import torch from transformers import AutoTokenizer, AutoModelForCausalLM # Load fine-tuned model and tokenizer model_path = "./finetuned_codegen" tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float32) # Set padding token tokenizer.pad_token = tokenizer.eos_token # Move model to CPU device = torch.device("cpu") model.to(device) # Test prompts prompts = [ "Write a Python program to print 'Hello, you name or any other thing!'" ] # Generate code for each prompt for prompt in prompts: inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=128).to(device) outputs = model.generate( **inputs, max_length=200, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id, do_sample=True, temperature=0.7, top_p=0.9 ) generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True) print(f"Prompt: {prompt}\nGenerated Code:\n{generated_code}\n{'-'*50}")