File size: 1,729 Bytes
9e1ae32
5bf7940
 
 
 
6fc5a2a
5bf7940
 
 
6fc5a2a
5bf7940
 
 
 
6fc5a2a
 
 
 
 
 
 
 
d5ee1d3
6fc5a2a
 
 
5bf7940
6fc5a2a
 
 
 
 
 
 
5bf7940
 
9e1ae32
 
5bf7940
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer

model_name = "winninghealth/WiNGPT-Babel"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)

def translate(text):
    prompt = f"<|im_start|>system\n中英互译下面的内容<|im_end|>\n<|im_start|>user\n{text}<|im_end|>\n<|im_start|>assistant\n"
    inputs = tokenizer([prompt], return_tensors="pt")
    outputs = model.generate(**inputs, max_new_tokens=512, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
    translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return translated_text

def custom_api(text_list, source_lang, target_lang):
    # 假设你的模型只支持中英互译
    if source_lang == "zh-CN" and target_lang == "en":
        translated_list = [translate(text) for text in text_list]
    elif source_lang == "en" and target_lang == "zh-CN":
        translated_list = [translate(text) for text in text_list]
    else:
        return {"error": "Unsupported language pair"}

    return {"translations": [{"detected_source_lang": source_lang, "text": translated_text} for translated_text in translated_list]}

# 创建 Gradio 接口
iface = gr.Interface(
    fn=custom_api,
    inputs=[
        gr.Textbox(lines=5, label="输入文本列表 (支持中英互译)", placeholder='["Hello", "World"]'),
        gr.Textbox(label="源语言", placeholder="zh-CN"),
        gr.Textbox(label="目标语言", placeholder="en")
    ],
    outputs=gr.JSON(label="翻译结果"),
    title="WiNGPT-Babel 翻译 Demo",
    description="基于 WiNGPT-Babel 模型的翻译演示。支持中英互译。",
)

iface.launch()