thanglekdi commited on
Commit
da5015b
·
1 Parent(s): c53a8fe
Files changed (4) hide show
  1. app.py +3 -61
  2. git.txt +4 -0
  3. phoGPT.py +58 -0
  4. test.py +64 -0
app.py CHANGED
@@ -1,70 +1,12 @@
1
  # app.py
2
- import torch
3
  import gradio as gr
4
- from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
5
-
6
- # 1️⃣ Cấu hình và load model + tokenizer
7
- model_path = "vinai/PhoGPT-4B-Chat"
8
-
9
- config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
10
- config.init_device = "cpu"
11
-
12
- model = AutoModelForCausalLM.from_pretrained("vinai/PhoGPT-4B-Chat", trust_remote_code=True)
13
- model.eval()
14
- tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
15
-
16
-
17
- def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
18
- # 2.1 — Gom system message và history vào messages list
19
- messages = [{"role": "system", "content": system_message}]
20
- for u, b in history:
21
- if u:
22
- messages.append({"role": "user", "content": u})
23
- if b:
24
- messages.append({"role": "assistant", "content": b})
25
- messages.append({"role": "user", "content": message})
26
-
27
- # 2.2 — Tạo prompt chuẩn
28
- input_prompt = tokenizer.apply_chat_template(
29
- messages,
30
- tokenize=False,
31
- add_generation_prompt=True
32
- )
33
-
34
- # 2.3 — Tokenize và đưa lên device
35
- # inputs = tokenizer(input_prompt, return_tensors="pt")
36
- input_ids = tokenizer(input_prompt, return_tensors="pt")
37
- # inputs = {k: v.to(model.device) for k, v in inputs.items()}
38
-
39
- # 2.4 — Sinh text
40
- outputs = model.generate(
41
- inputs=input_ids["input_ids"],
42
- max_new_tokens=max_tokens,
43
- temperature=temperature,
44
- top_p=top_p,
45
- do_sample=True,
46
- eos_token_id=tokenizer.eos_token_id,
47
- pad_token_id=tokenizer.pad_token_id,
48
- )
49
- # print('!!!! OUTPUTS 1: ',outputs)
50
- # 2.5 — Decode và tách phần assistant trả lời
51
- response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
52
- print('!! OUTPUTS 2: ',response)
53
-
54
- response = response.split("### Trả lời:")[1]
55
- print('!!!! OUTPUTS 3: ',response)
56
- return response
57
-
58
- # 2.6 — Cập nhật history và trả về
59
- # history.append((message, response))
60
- # return history
61
-
62
-
63
 
 
64
 
65
  # 3️⃣ Giao diện Gradio
66
  demo = gr.ChatInterface(
67
- respond,
68
  additional_inputs=[
69
  gr.Textbox("Bạn là một chatbot tiếng Việt thân thiện.", label="System message"),
70
  gr.Slider(1, 2048, value=512, step=1, label="Max new tokens"),
 
1
  # app.py
 
2
  import gradio as gr
3
+ import phoGPT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ respond = phoGPT.respond()
6
 
7
  # 3️⃣ Giao diện Gradio
8
  demo = gr.ChatInterface(
9
+ respond, #câu phản hồi
10
  additional_inputs=[
11
  gr.Textbox("Bạn là một chatbot tiếng Việt thân thiện.", label="System message"),
12
  gr.Slider(1, 2048, value=512, step=1, label="Max new tokens"),
git.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ git commit -am "Update space"; git push origin main
2
+
3
+ git add test.py
4
+ git rm test.py
phoGPT.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # 1️⃣ Cấu hình và load model + tokenizer
5
+ model_path = "vinai/PhoGPT-4B-Chat"
6
+
7
+ config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
8
+ config.init_device = "cpu"
9
+
10
+ model = AutoModelForCausalLM.from_pretrained("vinai/PhoGPT-4B-Chat", trust_remote_code=True)
11
+ model.eval()
12
+ tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
13
+
14
+
15
+ def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
16
+ # 2.1 — Gom system message và history vào messages list
17
+ messages = [{"role": "system", "content": system_message}]
18
+ for u, b in history:
19
+ if u:
20
+ messages.append({"role": "user", "content": u})
21
+ if b:
22
+ messages.append({"role": "assistant", "content": b})
23
+ messages.append({"role": "user", "content": message})
24
+
25
+ # 2.2 — Tạo prompt chuẩn
26
+ input_prompt = tokenizer.apply_chat_template(
27
+ messages,
28
+ tokenize=False,
29
+ add_generation_prompt=True
30
+ )
31
+
32
+ # 2.3 — Tokenize và đưa lên device
33
+ # inputs = tokenizer(input_prompt, return_tensors="pt")
34
+ input_ids = tokenizer(input_prompt, return_tensors="pt")
35
+ # inputs = {k: v.to(model.device) for k, v in inputs.items()}
36
+
37
+ # 2.4 — Sinh text
38
+ outputs = model.generate(
39
+ inputs=input_ids["input_ids"],
40
+ max_new_tokens=max_tokens,
41
+ temperature=temperature,
42
+ top_p=top_p,
43
+ do_sample=True,
44
+ eos_token_id=tokenizer.eos_token_id,
45
+ pad_token_id=tokenizer.pad_token_id,
46
+ )
47
+ # print('!!!! OUTPUTS 1: ',outputs)
48
+ # 2.5 — Decode và tách phần assistant trả lời
49
+ response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
50
+ print('!! OUTPUTS 2: ',response)
51
+
52
+ response = response.split("### Trả lời:")[1]
53
+ print('!!!! OUTPUTS 3: ',response)
54
+ return response
55
+
56
+ # 2.6 — Cập nhật history và trả về
57
+ # history.append((message, response))
58
+ # return history
test.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr # type: ignore
2
+ from huggingface_hub import InferenceClient # type: ignore
3
+
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+
9
+
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
+ messages = [{"role": "system", "content": system_message}]
19
+
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
+
26
+ messages.append({"role": "user", "content": message})
27
+
28
+ response = ""
29
+
30
+ for message in client.chat_completion(
31
+ messages,
32
+ max_tokens=max_tokens,
33
+ stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ ):
37
+ token = message.choices[0].delta.content
38
+
39
+ response += token
40
+ yield response
41
+
42
+
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
+ demo = gr.ChatInterface(
47
+ respond,
48
+ additional_inputs=[
49
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
+ gr.Slider(
53
+ minimum=0.1,
54
+ maximum=1.0,
55
+ value=0.95,
56
+ step=0.05,
57
+ label="Top-p (nucleus sampling)",
58
+ ),
59
+ ],
60
+ )
61
+
62
+
63
+ if __name__ == "__main__":
64
+ demo.launch()