rishiraj's picture
Update app.py
6954118 verified
import spaces
import gradio as gr
from transformers import pipeline
import torch
# Initialize the pipeline
pipe = pipeline(
"text-generation",
model="google/medgemma-27b-text-it",
torch_dtype=torch.bfloat16,
device="cuda",
)
@spaces.GPU
def generate_response(system_prompt, user_prompt):
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
output = pipe(messages, max_new_tokens=2048)
return output[0]["generated_text"][-1]["content"]
# Create Gradio UI
demo = gr.Interface(
fn=generate_response,
inputs=[
gr.Textbox(label="System Prompt", value="You are a helpful medical assistant."),
gr.Textbox(label="User Prompt", placeholder="Enter your question here..."),
],
outputs=gr.Textbox(label="Generated Response"),
title="MedGemma Medical Assistant",
description="Enter a system and user prompt to generate a medically-informed response."
)
# Launch the app
demo.launch()