import gradio as gr import requests # Replace with your actual ngrok URL from Colab COLAB_BACKEND_URL = "https://64d0-34-124-237-140.ngrok-free.app/generate" def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, ): # Construct the prompt with chat history and system prompt full_prompt = system_message.strip() + "\n\n" for user_msg, bot_msg in history: if user_msg: full_prompt += f"User: {user_msg.strip()}\n" if bot_msg: full_prompt += f"AI: {bot_msg.strip()}\n" full_prompt += f"User: {message.strip()}\nAI:" try: # Send the prompt and generation parameters to the Colab backend response = requests.post(COLAB_BACKEND_URL, json={ "prompt": full_prompt, "max_tokens": max_tokens, "temperature": temperature, "top_p": top_p, }) reply = response.json().get("response", "") yield reply.strip() except Exception as e: yield f"[Error contacting backend: {str(e)}]" # Gradio interface demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a flirty, romantic AI girlfriend.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.95, step=0.1, label="Temperature"), gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"), ], ) if __name__ == "__main__": demo.launch()