from fastapi import FastAPI, HTTPException from fastapi.responses import HTMLResponse from transformers import AutoModelForCausalLM, AutoTokenizer app = FastAPI(title="Chatbot") # Load the chatbot model (DialoGPT-medium) at startup model_name = "microsoft/DialoGPT-medium" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Function to generate chatbot response def get_chatbot_response(user_input: str, max_length=100): if not user_input: return "Please say something!" # Encode the input and generate a response input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt") chat_history_ids = model.generate( input_ids, max_length=max_length, pad_token_id=tokenizer.eos_token_id, no_repeat_ngram_size=3, do_sample=True, top_k=50, top_p=0.95, temperature=0.8 ) # Decode the response, skipping the input part response = tokenizer.decode(chat_history_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True) return response.strip() # HTML, CSS, and JS embedded HTML_CONTENT = """