File size: 2,630 Bytes
31596bd
f4d5985
f0a39b7
f4d5985
f0a39b7
23b03d1
f0a39b7
 
f4d5985
f0a39b7
 
 
 
 
31596bd
 
23b03d1
f4d5985
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f0a39b7
23b03d1
f0a39b7
 
 
 
 
 
f4d5985
 
 
 
f0a39b7
c9be6c0
 
 
 
 
 
 
 
 
23b03d1
 
 
 
f0a39b7
23b03d1
 
 
 
 
 
 
 
 
 
f0a39b7
 
 
 
 
 
 
f4d5985
f0a39b7
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
import torch
import threading

# Model ringan untuk CPU Basic
model_id = "microsoft/phi-2"

# Load tokenizer & model
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.float16,
    low_cpu_mem_usage=True
)

# Fungsi streaming
def generate_reply(prompt):
    inputs = tokenizer(prompt, return_tensors="pt")
    streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True)
    generation_kwargs = dict(
        **inputs,
        streamer=streamer,
        max_new_tokens=256,
        temperature=0.7,
        top_p=0.9,
        repetition_penalty=1.1
    )
    thread = threading.Thread(target=model.generate, kwargs=generation_kwargs)
    thread.start()
    partial_text = ""
    for new_text in streamer:
        partial_text += new_text
        yield partial_text

# Fungsi chatbot
def chat_fn(message, history):
    prompt = ""
    for user, bot in history:
        prompt += f"User: {user}\nBot: {bot}\n"
    prompt += f"User: {message}\nBot:"

    reply = ""
    for token in generate_reply(prompt):
        reply = token.split("Bot:")[-1].strip()
        yield history + [(message, reply)], history + [(message, reply)]

# URL logo rasmi dari Wikimedia
logo_url = "https://kliacustoms.net/gudang/logo.jpg"

# UI dengan tema biru
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="blue")) as demo:
    gr.HTML(f"""
    <div style='text-align:center;padding:20px;background:#002b80;color:white;border-radius:10px;'>
        <img src='{logo_url}' alt='Logo Kastam Malaysia' width='120' 
             style='margin-bottom:10px;display:block;margin-inline:auto;' />
        <h1>πŸ‡²πŸ‡Ύ KastamGPT-KLIA</h1>
        <p>Chatbot Eksperimen untuk simulasi kastam di KLIA</p>
    </div>
    """)

    with gr.Row():
        with gr.Column(scale=1):
            chatbot = gr.Chatbot(
                height=500,
                label="Perbualan",
                bubble_full_width=False,
                avatar_images=("πŸ§‘πŸ»β€πŸ’Ό","πŸ€–")
            )
            msg = gr.Textbox(placeholder="Tanya soalan di sini...", label="Input")
            clear = gr.Button("🧹 Clear Chat")

    state = gr.State([])

    def user_message(user_message, history):
        return "", history + [[user_message, None]]

    msg.submit(user_message, [msg, state], [msg, state]).then(
        chat_fn, [msg, state], [chatbot, state]
    )
    clear.click(lambda: ([], []), None, [chatbot, state], queue=False)

demo.launch()