wasmdashai's picture
<!DOCTYPE html> <html lang="en"> <head> <meta charset="UTF-8"> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <title>LAHJA AI - Voice Assistant</title> <script src="https://cdn.tailwindcss.com"></script> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.4.0/css/all.min.css"> <style> @import url('https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;500;600;700&display=swap'); body { font-family: 'Poppins', sans-serif; background: linear-gradient(135deg, #1a1a2e, #16213e); color: #fff; height: 100vh; overflow: hidden; } .chat-container { background: rgba(255, 255, 255, 0.1); backdrop-filter: blur(10px); border-radius: 20px; box-shadow: 0 10px 30px rgba(0, 0, 0, 0.3); border: 1px solid rgba(255, 255, 255, 0.1); } .message-bubble { max-width: 80%; padding: 12px 18px; border-radius: 20px; margin-bottom: 12px; position: relative; animation: fadeIn 0.3s ease-out; } .user-message { background: linear-gradient(135deg, #4e54c8, #8f94fb); align-self: flex-end; border-bottom-right-radius: 5px; } .ai-message { background: rgba(255, 255, 255, 0.15); align-self: flex-start; border-bottom-left-radius: 5px; } .pulse { animation: pulse 1.5s infinite; } @keyframes pulse { 0% { transform: scale(1); } 50% { transform: scale(1.05); } 100% { transform: scale(1); } } @keyframes fadeIn { from { opacity: 0; transform: translateY(10px); } to { opacity: 1; transform: translateY(0); } } .waveform { display: flex; align-items: center; height: 40px; gap: 3px; } .waveform-bar { background: rgba(255, 255, 255, 0.7); width: 4px; border-radius: 2px; animation: equalize 1.5s infinite ease-in-out; } @keyframes equalize { 0%, 100% { height: 10px; } 50% { height: 20px; } } .waveform-bar:nth-child(1) { animation-delay: -0.9s; } .waveform-bar:nth-child(2) { animation-delay: -0.7s; } .waveform-bar:nth-child(3) { animation-delay: -0.5s; } .waveform-bar:nth-child(4) { animation-delay: -0.3s; } .waveform-bar:nth-child(5) { animation-delay: -0.1s; } .typing-indicator { display: flex; align-items: center; gap: 5px; } .typing-dot { width: 8px; height: 8px; background: rgba(255, 255, 255, 0.7); border-radius: 50%; animation: typingAnimation 1.4s infinite ease-in-out; } .typing-dot:nth-child(1) { animation-delay: 0s; } .typing-dot:nth-child(2) { animation-delay: 0.2s; } .typing-dot:nth-child(3) { animation-delay: 0.4s; } @keyframes typingAnimation { 0%, 60%, 100% { transform: translateY(0); } 30% { transform: translateY(-5px); } } .scrollbar-hide::-webkit-scrollbar { display: none; } .scrollbar-hide { -ms-overflow-style: none; scrollbar-width: none; } </style> </head> <body class="flex items-center justify-center p-4"> <div class="chat-container w-full max-w-2xl h-[80vh] flex flex-col"> <!-- Header --> <div class="p-4 border-b border-gray-700 flex items-center justify-between"> <div class="flex items-center gap-3"> <div class="w-10 h-10 rounded-full bg-gradient-to-r from-purple-500 to-blue-500 flex items-center justify-center"> <i class="fas fa-robot text-white"></i> </div> <div> <h2 class="font-semibold">Voice Assistant</h2> <p class="text-xs text-gray-300">Powered by ChatGPT</p> </div> </div> <div class="flex items-center gap-2"> <button id="settings-btn" class="p-2 rounded-full hover:bg-gray-700 transition"> <i class="fas fa-cog text-gray-300"></i> </button> </div> </div> <!-- Chat Messages --> <div id="chat-messages" class="flex-1 p-4 overflow-y-auto scrollbar-hide flex flex-col"> <!-- Initial welcome message --> <div class="message-bubble ai-message"> <p>مرحباً! أنا مساعدك الصوتي الذكي. اضغط على زر الميكروفون لبدء المحادثة معي.</p> </div> </div> <!-- Input Area --> <div class="p-4 border-t border-gray-700"> <div class="flex items-center gap-2"> <button id="voice-btn" class="w-14 h-14 rounded-full bg-gradient-to-r from-purple-600 to-blue-500 flex items-center justify-center text-white hover:from-purple-700 hover:to-blue-600 transition-all shadow-lg pulse"> <i class="fas fa-microphone text-xl"></i> </button> <div class="flex-1 bg-gray-700 rounded-full px-4 py-2 flex items-center justify-between"> <p id="voice-status" class="text-sm text-gray-300">اضغط على الميكروفون للتحدث</p> <div id="waveform" class="waveform hidden"> <div class="waveform-bar"></div> <div class="waveform-bar"></div> <div class="waveform-bar"></div> <div class="waveform-bar"></div> <div class="waveform-bar"></div> </div> </div> </div> </div> </div> <!-- Settings Modal --> <div id="settings-modal" class="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center hidden z-50"> <div class="bg-gray-800 rounded-xl p-6 w-full max-w-md"> <div class="flex justify-between items-center mb-4"> <h3 class="text-xl font-semibold">الإعدادات</h3> <button id="close-settings" class="text-gray-400 hover:text-white"> <i class="fas fa-times"></i> </button> </div> <div class="space-y-4"> <div> <label class="block text-sm font-medium mb-1">Voice Model</label> <select id="voiceSelect" class="w-full bg-gray-700 border border-gray-600 rounded-lg px-3 py-2 text-white"> <option value="SA2">Najdi Arabic Haba v2</option> <option value="us">American English</option> <option value="SA1">Najdi Arabic Haba v1</option> <option value="SA3">Najdi Arabic AHmmed v1</option> </select> </div> <div> <label class="block text-sm font-medium mb-1">نبرة الصوت</label> <select class="w-full bg-gray-700 border border-gray-600 rounded-lg px-3 py-2 text-white"> <option>ذكر</option> <option>أنثى</option> </select> </div> <div> <label class="block text-sm font-medium mb-1">سرعة الصوت</label> <input type="range" min="0.5" max="2" step="0.1" value="1" class="w-full"> </div> </div> <div class="mt-6 flex justify-end gap-3"> <button id="save-settings" class="px-4 py-2 bg-blue-600 rounded-lg hover:bg-blue-700 transition"> حفظ الإعدادات </button> </div> </div> </div> <script type="module"> import {Client} from "https://cdn.jsdelivr.net/npm/@gradio/client/dist/index.min.js"; // DOM Elements const voiceBtn = document.getElementById('voice-btn'); const voiceStatus = document.getElementById('voice-status'); const waveform = document.getElementById('waveform'); const chatMessages = document.getElementById('chat-messages'); const settingsBtn = document.getElementById('settings-btn'); const settingsModal = document.getElementById('settings-modal'); const closeSettings = document.getElementById('close-settings'); const saveSettings = document.getElementById('save-settings'); // Speech recognition setup const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; const recognition = new SpeechRecognition(); recognition.lang = 'ar-SA'; recognition.interimResults = false; recognition.maxAlternatives = 1; // Speech synthesis setup const synth = window.speechSynthesis; // VITS Model Integration const voiceModels = { 'us': 'wasmdashai/vits-en-v1', 'SA1': 'wasmdashai/vits-ar-sa-huba-v1', 'SA2': 'wasmdashai/vits-ar-sa-huba-v2', 'SA3': 'wasmdashai/vits-ar-sa-A', }; // State variables let isListening = false; let conversationHistory = []; // Event Listeners voiceBtn.addEventListener('click', toggleVoiceRecognition); settingsBtn.addEventListener('click', () => settingsModal.classList.remove('hidden')); closeSettings.addEventListener('click', () => settingsModal.classList.add('hidden')); saveSettings.addEventListener('click', saveSettingsHandler); // Functions function toggleVoiceRecognition() { if (isListening) { stopListening(); } else { startListening(); } } function startListening() { isListening = true; voiceBtn.classList.add('animate-pulse'); voiceBtn.innerHTML = '<i class="fas fa-stop text-xl"></i>'; voiceStatus.textContent = 'أنا أستمع لك...'; waveform.classList.remove('hidden'); try { recognition.start(); } catch (e) { console.error('Error starting recognition:', e); stopListening(); } } function stopListening() { isListening = false; voiceBtn.classList.remove('animate-pulse'); voiceBtn.innerHTML = '<i class="fas fa-microphone text-xl"></i>'; voiceStatus.textContent = 'اضغط على الميكروفون للتحدث'; waveform.classList.add('hidden'); try { recognition.stop(); } catch (e) { console.error('Error stopping recognition:', e); } } function saveSettingsHandler() { // In a real app, you would save these settings to localStorage or a backend settingsModal.classList.add('hidden'); showSystemMessage('تم حفظ الإعدادات بنجاح'); } function showSystemMessage(text) { const messageDiv = document.createElement('div'); messageDiv.className = 'text-center text-xs text-gray-400 my-2'; messageDiv.textContent = text; chatMessages.appendChild(messageDiv); chatMessages.scrollTop = chatMessages.scrollHeight; } function addUserMessage(text) { const messageDiv = document.createElement('div'); messageDiv.className = 'message-bubble user-message'; messageDiv.innerHTML = `<p>${text}</p>`; chatMessages.appendChild(messageDiv); chatMessages.scrollTop = chatMessages.scrollHeight; } function addAiMessage(text) { const messageDiv = document.createElement('div'); messageDiv.className = 'message-bubble ai-message'; // Add typing indicator temporarily const typingDiv = document.createElement('div'); typingDiv.className = 'typing-indicator'; typingDiv.innerHTML = ` <div class="typing-dot"></div> <div class="typing-dot"></div> <div class="typing-dot"></div> `; messageDiv.appendChild(typingDiv); chatMessages.appendChild(messageDiv); chatMessages.scrollTop = chatMessages.scrollHeight; // Simulate AI thinking delay setTimeout(() => { typingDiv.remove(); messageDiv.innerHTML = `<p>${text}</p>`; speakResponse(text); }, 1500); } async function speakResponse(text) { const voiceSelect = document.getElementById('voiceSelect'); const voice = voiceSelect.value; try { const client = await Client.connect("wasmdashai/DemoLahja"); const result = await client.predict("/predict", { text: text, name_model: voiceModels[voice], speaking_rate: 1.0 }); const audioUrl = result.data?.[0]?.url; if (audioUrl) { const audio = new Audio(audioUrl); audio.play(); } else { fallbackTTS(text); } } catch (err) { console.error("VITS model error:", err); fallbackTTS(text); } } function fallbackTTS(text) { if (synth.speaking) { synth.cancel(); } const utterance = new SpeechSynthesisUtterance(text); utterance.lang = voiceSelect.value === 'us' ? 'en-US' : 'ar-SA'; utterance.rate = 1.0; synth.speak(utterance); } function processUserInput(text) { addUserMessage(text); conversationHistory.push({ role: 'user', content: text }); // In a real app, you would send this to your backend which connects to ChatGPT API // For this demo, we'll simulate a response simulateChatGPTResponse(text); } function simulateChatGPTResponse(userInput) { // This is a simulation - in a real app, you would call the ChatGPT API const responses = { "مرحبا": "مرحباً بك! كيف يمكنني مساعدتك اليوم؟", "السلام عليكم": "وعليكم السلام ورحمة الله وبركاته 🌹 كيف حالك؟", "كيف حالك": "أنا بخير، شكراً لسؤالك! كيف يمكنني مساعدتك؟", "ما هو اسمك": "أنا مساعدك الذكي الذي يعمل بالذكاء الاصطناعي. يمكنك تسميتي كما تحب!", "من صممك": "تم تطويري باستخدام تقنيات الذكاء الاصطناعي وخوارزميات التعلم الآلي.", "ماذا تستطيع ان تفعل": "أستطيع مساعدتك بالإجابة على أسئلة عامة، تقديم معلومات، ومساعدتك في بعض المهام اليومية.", "شكرا": "على الرحب والسعة! 🌸 هل هناك أي شيء آخر تحتاج مساعدتي فيه؟", "وداعا": "إلى اللقاء! 👋 لا تتردد في العودة إذا كنت بحاجة إلى أي مساعدة.", "ما هو تاريخ اليوم": `اليوم هو: ${new Date().toLocaleDateString("ar-EG")}`, "ما هو الوقت الان": `الوقت الآن هو: ${new Date().toLocaleTimeString("ar-EG")}`, "من اين انت": "أنا موجود في كل مكان 🌍 لأنني مساعد افتراضي على الإنترنت.", "ماهي عاصمه السعوديه": "عاصمة المملكة العربية السعودية هي الرياض.", "ماهي عاصمه اليمن": "عاصمة اليمن هي صنعاء.", "اعطني نصيحه": "✨ لا تؤجل عمل اليوم إلى الغد، وابدأ بخطوات صغيرة نحو هدفك." }; const defaultResponse = "أنا آسف، لم أفهم سؤالك بالكامل. هل يمكنك توضيح ذلك؟"; const response = responses[userInput.toLowerCase()] || defaultResponse; setTimeout(() => { addAiMessage(response); conversationHistory.push({ role: 'assistant', content: response }); }, 2000); } // Recognition event handlers recognition.onresult = (event) => { const speechResult = event.results[0][0].transcript; processUserInput(speechResult); stopListening(); }; recognition.onerror = (event) => { console.error('Speech recognition error', event.error); stopListening(); if (event.error === 'not-allowed') { showSystemMessage('يجب السماح باستخدام الميكروفون لتفعيل هذه الميزة'); } else { showSystemMessage('حدث خطأ في التعرف على الصوت. يرجى المحاولة مرة أخرى'); } }; recognition.onend = () => { if (isListening) { // If we're still supposed to be listening, restart recognition setTimeout(() => { try { recognition.start(); } catch (e) { console.error('Error restarting recognition:', e); stopListening(); } }, 500); } }; // Initialize voices when they become available if (speechSynthesis.onvoiceschanged !== undefined) { speechSynthesis.onvoiceschanged = () => { // Voices are now loaded }; } </script> <p style="border-radius: 8px; text-align: center; font-size: 12px; color: #fff; margin-top: 16px;position: fixed; left: 8px; bottom: 8px; z-index: 10; background: rgba(0, 0, 0, 0.8); padding: 4px 8px;">Made with <img src="https://enzostvs-deepsite.hf.space/logo.svg" alt="DeepSite Logo" style="width: 16px; height: 16px; vertical-align: middle;display:inline-block;margin-right:3px;filter:brightness(0) invert(1);"><a href="https://enzostvs-deepsite.hf.space" style="color: #fff;text-decoration: underline;" target="_blank" >DeepSite</a> - 🧬 <a href="https://enzostvs-deepsite.hf.space?remix=wasmdashai/wasmdashai-chatlhja" style="color: #fff;text-decoration: underline;" target="_blank" >Remix</a></p></body> </html> تشيغله - Initial Deployment
b59faa2 verified