import gradio as gr from groq import Groq import os from PIL import Image import base64 import uuid import time API_KEY = "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" client = Groq(api_key=API_KEY) # إعداد المجلدات والصور UPLOAD_FOLDER = "./uploaded_images" BOT_AVATAR = "https://shfra.netlify.app/imeg.png" os.makedirs(UPLOAD_FOLDER, exist_ok=True) # إعدادات الصور IMAGE_WIDTH = 1111 def preprocess_image(image: Image.Image) -> Image.Image: image_height = int(image.height * IMAGE_WIDTH / image.width) return image.resize((IMAGE_WIDTH, image_height)) def save_image(image: Image.Image) -> str: image_filename = f"{uuid.uuid4()}.jpeg" path = os.path.join(UPLOAD_FOLDER, image_filename) image.save(path, "JPEG", quality=95) return path def process_image(image_path: str) -> tuple: image = Image.open(image_path).convert('RGB') image = preprocess_image(image) saved_path = save_image(image) encoded = base64.b64encode(open(saved_path, 'rb').read()).decode('utf-8') return saved_path, encoded def format_message(content, image_data=None): if image_data: return [ {"type": "text", "text": content}, {"type": "image_url", "image_url": f"data:image/jpeg;base64,{image_data}"} ] return content def upload_images(files, uploaded_images): new_uploaded = uploaded_images.copy() if uploaded_images else [] history = [] for f in files: saved, _ = process_image(f) new_uploaded.append(saved) history.append(("تم رفع صورة:", f"")) return history, new_uploaded def chat_with_gpt(user_input, uploaded_images, chat_history): chat_history = chat_history or [] messages = [{"role": "system", "content": "أنت مساعد ذكي تحلل الصور وتجاوب على استفسارات المستخدم بدقة وبأسلوب محترم."}] if uploaded_images: for image_path in uploaded_images: _, encoded = process_image(image_path) messages.append({"role": "user", "content": format_message("وصف الصورة:", encoded)}) messages.append({"role": "user", "content": user_input}) # استدعاء النموذج completion = client.chat.completions.create( model="llama-3.3-70b-versatile", messages=messages, temperature=0.7, max_tokens=1024, top_p=1, stream=True ) response = "" for chunk in completion: if chunk.choices[0].delta.content: response += chunk.choices[0].delta.content time.sleep(0.01) chat_history.append({"role": "user", "content": user_input}) chat_history.append({"role": "assistant", "content": response}) return chat_history, uploaded_images # واجهة Gradio with gr.Blocks(theme=gr.themes.Soft()) as demo: gr.Markdown("# 🤖 ShfraAI - chatBot") chatbot = gr.Chatbot(label="ShfraAI", avatar_images=(None, BOT_AVATAR), type="messages") user_input = gr.Textbox(placeholder="اكتب رسالتك...", show_label=False) upload_button = gr.UploadButton("رفع صور", file_count="multiple") submit_button = gr.Button("إرسال") uploaded_state = gr.State([]) # لتخزين الصور chat_history_state = gr.State([]) # لتخزين المحادثة upload_button.upload( upload_images, inputs=[upload_button, uploaded_state], outputs=[chatbot, uploaded_state] ) submit_button.click( chat_with_gpt, inputs=[user_input, uploaded_state, chat_history_state], outputs=[chatbot, chat_history_state] ) user_input.submit( chat_with_gpt, inputs=[user_input, uploaded_state, chat_history_state], outputs=[chatbot, chat_history_state] ) if __name__ == "__main__": demo.queue().launch(share=True)