File size: 1,346 Bytes
13ad507
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import gradio as gr
from transformers import AutoTokenizer, pipeline
import torch

# Load the model and tokenizer
model = "K00B404/DeepQwenScalerPlus"
tokenizer = AutoTokenizer.from_pretrained(model)

# Initialize the pipeline for text generation
pipeline = pipeline(
    "text-generation",
    model=model,
    torch_dtype=torch.float16,
    device_map="auto",
)

# Function to interact with the model
def generate_response(user_message):
    messages = [
        {"role": "system", "content": "You are a reasoning coder and specialize in generating Python scripts"},
        {"role": "user", "content": user_message}
    ]
    
    # Tokenize the input message
    prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
    
    # Get the model's output
    outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
    
    return outputs[0]["generated_text"]

# Gradio interface
iface = gr.Interface(
    fn=generate_response, 
    inputs=gr.Textbox(label="Ask a Question", placeholder="Enter your question here..."), 
    outputs=gr.Textbox(label="Generated Response"), 
    title="DeepQwenScalerPlus Gradio App",
    description="Interact with the DeepQwenScalerPlus model to get Python script generation responses."
)

# Launch the Gradio app
iface.launch()