Kai Izumoto commited on
Commit
608b73e
·
verified ·
1 Parent(s): cd7ecf9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -0
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Load the model directly from Hugging Face Hub
5
+ model_name = "Qwen/Qwen3-235B-A22B-Thinking-2507-FP8"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ model_name,
9
+ device_map="auto", # Automatically uses GPU if available
10
+ torch_dtype="float16",
11
+ low_cpu_mem_usage=True
12
+ )
13
+
14
+ def generate(prompt):
15
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
16
+ outputs = model.generate(
17
+ **inputs,
18
+ max_new_tokens=500,
19
+ temperature=0.7,
20
+ top_p=0.9,
21
+ do_sample=True
22
+ )
23
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
24
+
25
+ demo = gr.Interface(fn=generate, inputs="text", outputs="text")
26
+ demo.launch()