import os import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM, TextGenerationPipeline # 使用你在 Spaces 中添加的 Secret hf_token = os.environ.get("HUGGINGFACE_TOKEN") # 加载 tokenizer 和模型(带token) model_id = "google/gemma-3-27b-it" tokenizer = AutoTokenizer.from_pretrained(model_id, token=hf_token) model = AutoModelForCausalLM.from_pretrained(model_id, token=hf_token, device_map="auto") # 构造推理管道 pipe = TextGenerationPipeline(model=model, tokenizer=tokenizer) # 推理函数 def generate(prompt): output = pipe(prompt, max_new_tokens=100, do_sample=True, temperature=0.7) return output[0]["generated_text"] # Gradio UI gr.Interface( fn=generate, inputs=gr.Text(label="Enter your prompt"), outputs=gr.Textbox(label="Generated Text"), title="Gemma-3-27B Text Generation" ).launch()