File size: 3,948 Bytes
ec8348d
 
 
 
 
 
 
 
 
 
 
 
 
6cf5fa2
ec8348d
 
 
417c6a3
ec8348d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355a355
 
 
ec8348d
 
355a355
ec8348d
355a355
ec8348d
355a355
 
 
 
 
 
 
 
 
 
ec8348d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
355a355
 
 
 
 
ec8348d
f81af4f
355a355
ec8348d
 
 
355a355
 
 
 
 
 
 
 
ec8348d
 
 
355a355
 
ec8348d
355a355
ec8348d
 
355a355
 
ec8348d
 
 
 
355a355
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import gradio as gr
from groq import Groq
import os
from PIL import Image
import base64
import uuid
import time

API_KEY = "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"
client = Groq(api_key=API_KEY)

# إعداد المجلدات والصور
UPLOAD_FOLDER = "./uploaded_images"
BOT_AVATAR = "https://shfra.netlify.app/imeg.png"
os.makedirs(UPLOAD_FOLDER, exist_ok=True)

# إعدادات الصور
IMAGE_WIDTH = 1111

def preprocess_image(image: Image.Image) -> Image.Image:
    image_height = int(image.height * IMAGE_WIDTH / image.width)
    return image.resize((IMAGE_WIDTH, image_height))

def save_image(image: Image.Image) -> str:
    image_filename = f"{uuid.uuid4()}.jpeg"
    path = os.path.join(UPLOAD_FOLDER, image_filename)
    image.save(path, "JPEG", quality=95)
    return path

def process_image(image_path: str) -> tuple:
    image = Image.open(image_path).convert('RGB')
    image = preprocess_image(image)
    saved_path = save_image(image)
    encoded = base64.b64encode(open(saved_path, 'rb').read()).decode('utf-8')
    return saved_path, encoded

def format_message(content, image_data=None):
    if image_data:
        return [
            {"type": "text", "text": content},
            {"type": "image_url", "image_url": f"data:image/jpeg;base64,{image_data}"}
        ]
    return content

def upload_images(files, uploaded_images):
    new_uploaded = uploaded_images.copy() if uploaded_images else []
    history = []
    for f in files:
        saved, _ = process_image(f)
        new_uploaded.append(saved)
        history.append(("تم رفع صورة:", f"<img src='{saved}' style='max-width:200px;'>"))
    return history, new_uploaded

def chat_with_gpt(user_input, uploaded_images, chat_history):
    chat_history = chat_history or []
    messages = [{"role": "system", "content": "أنت مساعد ذكي تحلل الصور وتجاوب على استفسارات المستخدم بدقة وبأسلوب محترم."}]
    
    if uploaded_images:
        for image_path in uploaded_images:
            _, encoded = process_image(image_path)
            messages.append({"role": "user", "content": format_message("وصف الصورة:", encoded)})

    messages.append({"role": "user", "content": user_input})

    # استدعاء النموذج
    completion = client.chat.completions.create(
        model="llama-3.3-70b-versatile",
        messages=messages,
        temperature=0.7,
        max_tokens=1024,
        top_p=1,
        stream=True
    )
    response = ""
    for chunk in completion:
        if chunk.choices[0].delta.content:
            response += chunk.choices[0].delta.content
            time.sleep(0.01)

    chat_history.append({"role": "user", "content": user_input})
    chat_history.append({"role": "assistant", "content": response})
    return chat_history, uploaded_images

# واجهة Gradio
with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("# 🤖 ShfraAI - chatBot")
    chatbot = gr.Chatbot(label="ShfraAI", avatar_images=(None, BOT_AVATAR), type="messages")
    user_input = gr.Textbox(placeholder="اكتب رسالتك...", show_label=False)
    upload_button = gr.UploadButton("رفع صور", file_count="multiple")
    submit_button = gr.Button("إرسال")
    uploaded_state = gr.State([])      # لتخزين الصور
    chat_history_state = gr.State([])  # لتخزين المحادثة

    upload_button.upload(
        upload_images,
        inputs=[upload_button, uploaded_state],
        outputs=[chatbot, uploaded_state]
    )

    submit_button.click(
        chat_with_gpt,
        inputs=[user_input, uploaded_state, chat_history_state],
        outputs=[chatbot, chat_history_state]
    )

    user_input.submit(
        chat_with_gpt,
        inputs=[user_input, uploaded_state, chat_history_state],
        outputs=[chatbot, chat_history_state]
    )

if __name__ == "__main__":
    demo.queue().launch(share=True)