Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,018 Bytes
d2c40eb 6954118 d2c40eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import spaces
import gradio as gr
from transformers import pipeline
import torch
# Initialize the pipeline
pipe = pipeline(
"text-generation",
model="google/medgemma-27b-text-it",
torch_dtype=torch.bfloat16,
device="cuda",
)
@spaces.GPU
def generate_response(system_prompt, user_prompt):
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
]
output = pipe(messages, max_new_tokens=2048)
return output[0]["generated_text"][-1]["content"]
# Create Gradio UI
demo = gr.Interface(
fn=generate_response,
inputs=[
gr.Textbox(label="System Prompt", value="You are a helpful medical assistant."),
gr.Textbox(label="User Prompt", placeholder="Enter your question here..."),
],
outputs=gr.Textbox(label="Generated Response"),
title="MedGemma Medical Assistant",
description="Enter a system and user prompt to generate a medically-informed response."
)
# Launch the app
demo.launch() |