Spaces:
Running
Running
Update index.html
Browse files- index.html +121 -83
index.html
CHANGED
@@ -198,7 +198,7 @@
|
|
198 |
const sendButton = document.getElementById('sendButton');
|
199 |
|
200 |
// --- API Endpoint ---
|
201 |
-
const API_ENDPOINT_URL = "https://k2labsym7o48yj4r.us-east-1.aws.endpoints.huggingface.cloud";
|
202 |
|
203 |
// --- State Variables ---
|
204 |
let recognition;
|
@@ -316,7 +316,7 @@
|
|
316 |
const textToSpeak = ttsQueue.shift();
|
317 |
|
318 |
setTimeout(() => {
|
319 |
-
synth.cancel();
|
320 |
const utterance = new SpeechSynthesisUtterance(textToSpeak);
|
321 |
utterance.lang = 'en-US';
|
322 |
utterance.rate = 1.2;
|
@@ -373,7 +373,7 @@
|
|
373 |
currentAssistantMessageElement = null;
|
374 |
sentenceBuffer = "";
|
375 |
spokenTextPointer = 0;
|
376 |
-
ttsQueue = [];
|
377 |
recognitionWasRunning = false;
|
378 |
|
379 |
if (isListening && recognition) {
|
@@ -383,23 +383,27 @@
|
|
383 |
console.log("Stopped recognition temporarily for API call.");
|
384 |
} catch(e) { console.warn("Could not stop recognition before API call:", e); }
|
385 |
}
|
386 |
-
if (synth && synth.speaking) {
|
387 |
synth.cancel();
|
388 |
isSpeaking = false;
|
389 |
}
|
390 |
|
391 |
const requestBody = {
|
|
|
392 |
messages: conversationHistory,
|
393 |
max_tokens: 750,
|
394 |
stream: true
|
|
|
395 |
};
|
396 |
const requestHeaders = {
|
397 |
'Content-Type': 'application/json',
|
398 |
'Accept': 'text/event-stream'
|
|
|
|
|
399 |
};
|
400 |
|
401 |
try {
|
402 |
-
console.log("Sending request to:", apiEndpoint);
|
403 |
const response = await fetch(apiEndpoint, { method: 'POST', headers: requestHeaders, body: JSON.stringify(requestBody) });
|
404 |
|
405 |
if (!response.ok) {
|
@@ -408,6 +412,10 @@
|
|
408 |
try {
|
409 |
const errorJson = JSON.parse(errorText);
|
410 |
detail = errorJson.detail || errorJson.error?.message || errorJson.message || JSON.stringify(errorJson);
|
|
|
|
|
|
|
|
|
411 |
} catch (parseError) {}
|
412 |
throw new Error(`API Error: ${response.status} ${response.statusText} - ${detail}`);
|
413 |
}
|
@@ -428,15 +436,20 @@
|
|
428 |
isDoneProcessingStream = true;
|
429 |
if (partialChunk.trim()) {
|
430 |
console.warn("Stream ended by reader 'done' with unprocessed partial chunk:", partialChunk);
|
|
|
|
|
431 |
}
|
432 |
break;
|
433 |
}
|
434 |
|
435 |
const chunkText = partialChunk + decoder.decode(value, { stream: true });
|
|
|
|
|
436 |
const eventStrings = chunkText.split("\n\n");
|
437 |
|
|
|
438 |
if (!chunkText.endsWith("\n\n") && eventStrings.length > 0) {
|
439 |
-
partialChunk = eventStrings.pop();
|
440 |
} else {
|
441 |
partialChunk = "";
|
442 |
}
|
@@ -444,34 +457,27 @@
|
|
444 |
for (const eventString of eventStrings) {
|
445 |
if (!eventString.trim()) continue;
|
446 |
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
for (const line of lines) {
|
452 |
-
if (line.startsWith("data:")) {
|
453 |
-
const dataJson = line.substring(5).trim();
|
454 |
-
if (dataJson === "[DONE]") {
|
455 |
-
console.log("Received [DONE] signal in stream.");
|
456 |
-
isDoneSignalFound = true;
|
457 |
-
isDoneProcessingStream = true;
|
458 |
-
break;
|
459 |
-
}
|
460 |
-
try {
|
461 |
-
const data = JSON.parse(dataJson);
|
462 |
-
if (data.choices && data.choices[0]?.delta?.content) {
|
463 |
-
content += data.choices[0].delta.content;
|
464 |
-
}
|
465 |
-
} catch (e) {
|
466 |
-
console.error("Error parsing stream data JSON:", e, "Data:", dataJson);
|
467 |
-
}
|
468 |
-
}
|
469 |
}
|
470 |
|
471 |
-
if (
|
472 |
-
|
473 |
-
|
474 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
475 |
}
|
476 |
}
|
477 |
}
|
@@ -497,7 +503,7 @@
|
|
497 |
console.log("Rendered final sanitized HTML for assistant message.");
|
498 |
} catch (e) {
|
499 |
console.error("Error processing final Markdown/HTML:", e);
|
500 |
-
currentAssistantMessageElement.textContent = sentenceBuffer;
|
501 |
}
|
502 |
}
|
503 |
}
|
@@ -505,10 +511,12 @@
|
|
505 |
if (sentenceBuffer) {
|
506 |
conversationHistory.push({ role: "assistant", content: sentenceBuffer });
|
507 |
} else {
|
508 |
-
console.log("API call successful but no content received. Removing last user message from history.");
|
509 |
-
|
510 |
-
|
511 |
-
|
|
|
|
|
512 |
}
|
513 |
|
514 |
} catch (error) {
|
@@ -517,13 +525,21 @@
|
|
517 |
|
518 |
let userFriendlyError = `Sorry, I encountered an error: ${error.message}`;
|
519 |
if (error instanceof TypeError && error.message.toLowerCase().includes('fetch')) {
|
520 |
-
userFriendlyError = `Connection Error: Could not connect to the API
|
521 |
statusDiv.textContent = 'Connection Error';
|
522 |
-
} else {
|
523 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
524 |
}
|
525 |
addMessageToChatbox('assistant', userFriendlyError);
|
526 |
|
|
|
527 |
if (conversationHistory.length > 0 && conversationHistory[conversationHistory.length - 1].role === 'user') {
|
528 |
conversationHistory.pop();
|
529 |
}
|
@@ -532,16 +548,18 @@
|
|
532 |
console.log("API processing finished or errored. Entering finally block.");
|
533 |
isApiProcessing = false;
|
534 |
|
535 |
-
|
|
|
536 |
if (ttsQueue.length === 0 && !isSpeaking) {
|
|
|
537 |
console.log("Finally: TTS idle. Enabling inputs and checking recognition restart.");
|
538 |
enableInputs();
|
539 |
statusDiv.textContent = isListening ? 'Listening...' : '';
|
540 |
restartRecognitionIfNeeded(recognitionWasRunning);
|
541 |
} else {
|
542 |
-
|
543 |
}
|
544 |
-
},
|
545 |
}
|
546 |
}
|
547 |
|
@@ -550,12 +568,15 @@
|
|
550 |
currentAssistantMessageElement = addMessageToChatbox('assistant', '', true);
|
551 |
}
|
552 |
sentenceBuffer += content;
|
|
|
553 |
currentAssistantMessageElement.textContent = sentenceBuffer;
|
554 |
chatbox.scrollTop = chatbox.scrollHeight;
|
555 |
|
|
|
556 |
let searchStart = spokenTextPointer;
|
557 |
while (searchStart < sentenceBuffer.length) {
|
558 |
-
|
|
|
559 |
if (sentenceEndMatch) {
|
560 |
const sentenceEndIndex = searchStart + sentenceEndMatch.index + sentenceEndMatch[1].length;
|
561 |
const textToSpeak = sentenceBuffer.substring(spokenTextPointer, sentenceEndIndex).trim();
|
@@ -564,9 +585,9 @@
|
|
564 |
speakText(textToSpeak);
|
565 |
spokenTextPointer = sentenceEndIndex;
|
566 |
}
|
567 |
-
searchStart = spokenTextPointer;
|
568 |
} else {
|
569 |
-
break;
|
570 |
}
|
571 |
}
|
572 |
}
|
@@ -599,6 +620,7 @@
|
|
599 |
function addMessageToChatbox(role, text, isStreaming = false) {
|
600 |
const messageDiv = document.createElement('div');
|
601 |
messageDiv.classList.add('chat-bubble');
|
|
|
602 |
messageDiv.textContent = text;
|
603 |
messageDiv.classList.add(role === 'user' ? 'user-bubble' : 'assistant-bubble');
|
604 |
if (role === 'assistant' && isStreaming) {
|
@@ -626,7 +648,7 @@
|
|
626 |
console.log("Disabling inputs.");
|
627 |
textInput.disabled = true;
|
628 |
sendButton.disabled = true;
|
629 |
-
if (recognition) {
|
630 |
recordButton.disabled = true;
|
631 |
recordButton.classList.add('opacity-50');
|
632 |
}
|
@@ -636,20 +658,22 @@
|
|
636 |
console.log("Enabling inputs.");
|
637 |
textInput.disabled = false;
|
638 |
sendButton.disabled = textInput.value.trim() === '' || isApiProcessing;
|
639 |
-
|
640 |
recordButton.disabled = false;
|
641 |
recordButton.classList.remove('opacity-50');
|
642 |
}
|
643 |
}
|
644 |
|
645 |
-
function stopListening(forceStop = false) {
|
646 |
-
if (!recognition) return;
|
647 |
const wasListening = isListening;
|
648 |
-
isListening = false;
|
649 |
-
|
|
|
650 |
console.log("Stopping listening session.");
|
651 |
-
clearTimeout(restartTimer);
|
652 |
updateButtonUI(false);
|
|
|
653 |
if (!isApiProcessing && !isSpeaking && ttsQueue.length === 0) {
|
654 |
statusDiv.textContent = 'Stopping...';
|
655 |
setTimeout(() => {
|
@@ -657,32 +681,40 @@
|
|
657 |
}, 500);
|
658 |
}
|
659 |
try {
|
660 |
-
recognition.abort();
|
661 |
console.log("Recognition aborted.");
|
662 |
} catch (e) {
|
663 |
console.warn("Error aborting recognition (might have already stopped):", e);
|
664 |
}
|
665 |
}
|
666 |
-
|
|
|
667 |
console.log("Cancelling any TTS on stopListening.");
|
668 |
-
synth.cancel();
|
669 |
-
ttsQueue = [];
|
670 |
-
isSpeaking = false;
|
671 |
}
|
672 |
-
|
|
|
673 |
enableInputs();
|
674 |
-
if (!isSpeaking && ttsQueue.length === 0) {
|
675 |
statusDiv.textContent = '';
|
676 |
}
|
677 |
}
|
678 |
}
|
679 |
|
680 |
function startListening() {
|
681 |
-
if (!recognition || isListening) return;
|
|
|
|
|
682 |
navigator.mediaDevices.getUserMedia({ audio: true })
|
683 |
.then(stream => {
|
|
|
|
|
|
|
684 |
stream.getTracks().forEach(track => track.stop());
|
685 |
console.log("Microphone permission granted or already available.");
|
|
|
686 |
isListening = true;
|
687 |
updateButtonUI(true);
|
688 |
statusDiv.textContent = 'Starting...';
|
@@ -691,7 +723,7 @@
|
|
691 |
} catch (e) {
|
692 |
console.error("Error starting recognition:", e);
|
693 |
statusDiv.textContent = "Error starting listening.";
|
694 |
-
isListening = false;
|
695 |
updateButtonUI(false);
|
696 |
}
|
697 |
})
|
@@ -699,55 +731,60 @@
|
|
699 |
console.error("Microphone access error:", err);
|
700 |
if (err.name === 'NotAllowedError' || err.name === 'PermissionDeniedError') {
|
701 |
statusDiv.textContent = 'Microphone access denied.';
|
702 |
-
addMessageToChatbox('assistant', 'Error: Microphone access is required for voice input.');
|
703 |
} else {
|
704 |
statusDiv.textContent = `Mic Error: ${err.name}`;
|
705 |
addMessageToChatbox('assistant', `Error accessing microphone: ${err.message}`);
|
706 |
}
|
707 |
-
isListening = false;
|
708 |
updateButtonUI(false);
|
709 |
});
|
710 |
}
|
711 |
|
712 |
-
|
713 |
-
|
714 |
-
|
715 |
-
|
716 |
-
|
717 |
-
|
718 |
-
|
719 |
-
|
|
|
|
|
|
|
720 |
|
721 |
sendButton.addEventListener('click', () => {
|
722 |
const text = textInput.value.trim();
|
723 |
if (text && !isApiProcessing) {
|
724 |
handleUserInput(text);
|
725 |
textInput.value = '';
|
726 |
-
sendButton.disabled = true;
|
727 |
}
|
728 |
});
|
729 |
|
730 |
textInput.addEventListener('keypress', (e) => {
|
731 |
-
if (e.key === 'Enter' && !e.shiftKey) {
|
732 |
-
e.preventDefault();
|
733 |
const text = textInput.value.trim();
|
734 |
-
if (text && !sendButton.disabled) {
|
735 |
handleUserInput(text);
|
736 |
textInput.value = '';
|
737 |
-
sendButton.disabled = true;
|
738 |
}
|
739 |
}
|
740 |
});
|
741 |
|
742 |
textInput.addEventListener('input', () => {
|
|
|
743 |
sendButton.disabled = textInput.value.trim() === '' || isApiProcessing;
|
744 |
});
|
745 |
|
746 |
-
|
|
|
747 |
addMessageToChatbox('assistant', 'Hello! Use the microphone or type a message below.');
|
748 |
-
console.log("Voice/Text Chat App Initialized (Markdown Enabled)");
|
749 |
-
updateButtonUI(false);
|
750 |
-
enableInputs();
|
751 |
|
752 |
</script>
|
753 |
|
@@ -795,15 +832,16 @@
|
|
795 |
}
|
796 |
}
|
797 |
|
798 |
-
let matrixInterval = setInterval(drawMatrix, 40);
|
799 |
|
800 |
window.addEventListener('resize', () => {
|
801 |
const oldWidth = matrixCanvas.width;
|
802 |
const oldHeight = matrixCanvas.height;
|
803 |
-
|
804 |
matrixCanvas.width = window.innerWidth;
|
805 |
matrixCanvas.height = window.innerHeight;
|
806 |
|
|
|
807 |
if (matrixCanvas.width !== oldWidth || matrixCanvas.height !== oldHeight) {
|
808 |
initializeMatrixDrops();
|
809 |
}
|
|
|
198 |
const sendButton = document.getElementById('sendButton');
|
199 |
|
200 |
// --- API Endpoint ---
|
201 |
+
const API_ENDPOINT_URL = "https://k2labsym7o48yj4r.us-east-1.aws.endpoints.huggingface.cloud/v1/chat/completions";
|
202 |
|
203 |
// --- State Variables ---
|
204 |
let recognition;
|
|
|
316 |
const textToSpeak = ttsQueue.shift();
|
317 |
|
318 |
setTimeout(() => {
|
319 |
+
synth.cancel(); // Clear any previous utterances
|
320 |
const utterance = new SpeechSynthesisUtterance(textToSpeak);
|
321 |
utterance.lang = 'en-US';
|
322 |
utterance.rate = 1.2;
|
|
|
373 |
currentAssistantMessageElement = null;
|
374 |
sentenceBuffer = "";
|
375 |
spokenTextPointer = 0;
|
376 |
+
ttsQueue = []; // Clear TTS queue for new response
|
377 |
recognitionWasRunning = false;
|
378 |
|
379 |
if (isListening && recognition) {
|
|
|
383 |
console.log("Stopped recognition temporarily for API call.");
|
384 |
} catch(e) { console.warn("Could not stop recognition before API call:", e); }
|
385 |
}
|
386 |
+
if (synth && synth.speaking) { // Cancel any ongoing speech
|
387 |
synth.cancel();
|
388 |
isSpeaking = false;
|
389 |
}
|
390 |
|
391 |
const requestBody = {
|
392 |
+
model: "mradermacher/XortronCriminalComputingConfig-GGUF", // Specify your model
|
393 |
messages: conversationHistory,
|
394 |
max_tokens: 750,
|
395 |
stream: true
|
396 |
+
// Add other OpenAI-compatible parameters if needed, e.g., temperature: 0.7
|
397 |
};
|
398 |
const requestHeaders = {
|
399 |
'Content-Type': 'application/json',
|
400 |
'Accept': 'text/event-stream'
|
401 |
+
// No 'Authorization' header for public endpoints, add if required by your specific setup
|
402 |
+
// 'Authorization': 'Bearer YOUR_HF_TOKEN_OR_DUMMY_IF_NEEDED'
|
403 |
};
|
404 |
|
405 |
try {
|
406 |
+
console.log("Sending request to:", apiEndpoint, "with body:", JSON.stringify(requestBody).substring(0, 200) + "...");
|
407 |
const response = await fetch(apiEndpoint, { method: 'POST', headers: requestHeaders, body: JSON.stringify(requestBody) });
|
408 |
|
409 |
if (!response.ok) {
|
|
|
412 |
try {
|
413 |
const errorJson = JSON.parse(errorText);
|
414 |
detail = errorJson.detail || errorJson.error?.message || errorJson.message || JSON.stringify(errorJson);
|
415 |
+
// If the error is about a missing API key for OpenAI, it might come from the HF endpoint expecting one
|
416 |
+
if (detail.toLowerCase().includes("api key") && detail.toLowerCase().includes("openai")) {
|
417 |
+
detail += " (This error might be from the Hugging Face endpoint if it's configured to require an API key, even if it's mimicking OpenAI.)";
|
418 |
+
}
|
419 |
} catch (parseError) {}
|
420 |
throw new Error(`API Error: ${response.status} ${response.statusText} - ${detail}`);
|
421 |
}
|
|
|
436 |
isDoneProcessingStream = true;
|
437 |
if (partialChunk.trim()) {
|
438 |
console.warn("Stream ended by reader 'done' with unprocessed partial chunk:", partialChunk);
|
439 |
+
// Process any remaining partial chunk if it seems valid
|
440 |
+
// This part might need careful handling depending on how your stream ends
|
441 |
}
|
442 |
break;
|
443 |
}
|
444 |
|
445 |
const chunkText = partialChunk + decoder.decode(value, { stream: true });
|
446 |
+
// OpenAI-compatible streams usually send events like "data: {...}\n\n"
|
447 |
+
// And a final "data: [DONE]\n\n"
|
448 |
const eventStrings = chunkText.split("\n\n");
|
449 |
|
450 |
+
|
451 |
if (!chunkText.endsWith("\n\n") && eventStrings.length > 0) {
|
452 |
+
partialChunk = eventStrings.pop(); // Keep the incomplete event for the next chunk
|
453 |
} else {
|
454 |
partialChunk = "";
|
455 |
}
|
|
|
457 |
for (const eventString of eventStrings) {
|
458 |
if (!eventString.trim()) continue;
|
459 |
|
460 |
+
if (eventString.startsWith("data: [DONE]")) {
|
461 |
+
console.log("Received [DONE] signal in stream.");
|
462 |
+
isDoneProcessingStream = true;
|
463 |
+
break;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
464 |
}
|
465 |
|
466 |
+
if (eventString.startsWith("data:")) {
|
467 |
+
const dataJson = eventString.substring(5).trim();
|
468 |
+
try {
|
469 |
+
const data = JSON.parse(dataJson);
|
470 |
+
if (data.choices && data.choices[0]?.delta?.content) {
|
471 |
+
processStreamContent(data.choices[0].delta.content);
|
472 |
+
} else if (data.choices && data.choices[0]?.finish_reason) {
|
473 |
+
console.log("Stream finished with reason:", data.choices[0].finish_reason);
|
474 |
+
// isDoneProcessingStream = true; // The [DONE] signal is more reliable
|
475 |
+
}
|
476 |
+
} catch (e) {
|
477 |
+
console.error("Error parsing stream data JSON:", e, "Data:", dataJson);
|
478 |
+
}
|
479 |
+
} else {
|
480 |
+
console.warn("Received non-data event in stream (or malformed data):", eventString);
|
481 |
}
|
482 |
}
|
483 |
}
|
|
|
503 |
console.log("Rendered final sanitized HTML for assistant message.");
|
504 |
} catch (e) {
|
505 |
console.error("Error processing final Markdown/HTML:", e);
|
506 |
+
currentAssistantMessageElement.textContent = sentenceBuffer; // Fallback to text
|
507 |
}
|
508 |
}
|
509 |
}
|
|
|
511 |
if (sentenceBuffer) {
|
512 |
conversationHistory.push({ role: "assistant", content: sentenceBuffer });
|
513 |
} else {
|
514 |
+
console.log("API call successful but no content received. Removing last user message from history if appropriate.");
|
515 |
+
// Check if last user message should be popped if API returned empty consistently
|
516 |
+
// For now, let's assume empty content is possible and valid.
|
517 |
+
// if (conversationHistory.length > 0 && conversationHistory[conversationHistory.length - 1].role === 'user') {
|
518 |
+
// conversationHistory.pop();
|
519 |
+
// }
|
520 |
}
|
521 |
|
522 |
} catch (error) {
|
|
|
525 |
|
526 |
let userFriendlyError = `Sorry, I encountered an error: ${error.message}`;
|
527 |
if (error instanceof TypeError && error.message.toLowerCase().includes('fetch')) {
|
528 |
+
userFriendlyError = `Connection Error: Could not connect to the API. Please check the URL and network connection.`;
|
529 |
statusDiv.textContent = 'Connection Error';
|
530 |
+
} else if (response && response.status === 401) {
|
531 |
+
userFriendlyError = `Authentication Error: The API endpoint requires authentication. Please check if an API key/token is needed.`;
|
532 |
+
statusDiv.textContent = 'Auth Error';
|
533 |
+
} else if (response && response.status === 422) {
|
534 |
+
userFriendlyError = `Input Error: The data sent to the API was invalid. (${error.message})`;
|
535 |
+
statusDiv.textContent = 'Invalid Input';
|
536 |
+
}
|
537 |
+
else {
|
538 |
+
statusDiv.textContent = `API Error`;
|
539 |
}
|
540 |
addMessageToChatbox('assistant', userFriendlyError);
|
541 |
|
542 |
+
// Clean up conversation history if the user's message led to an error and no assistant response was formed
|
543 |
if (conversationHistory.length > 0 && conversationHistory[conversationHistory.length - 1].role === 'user') {
|
544 |
conversationHistory.pop();
|
545 |
}
|
|
|
548 |
console.log("API processing finished or errored. Entering finally block.");
|
549 |
isApiProcessing = false;
|
550 |
|
551 |
+
// Wait for TTS to finish before enabling inputs
|
552 |
+
const checkTTSDone = setInterval(() => {
|
553 |
if (ttsQueue.length === 0 && !isSpeaking) {
|
554 |
+
clearInterval(checkTTSDone);
|
555 |
console.log("Finally: TTS idle. Enabling inputs and checking recognition restart.");
|
556 |
enableInputs();
|
557 |
statusDiv.textContent = isListening ? 'Listening...' : '';
|
558 |
restartRecognitionIfNeeded(recognitionWasRunning);
|
559 |
} else {
|
560 |
+
console.log("Finally: TTS queue active or speaking. Waiting for TTS to complete...");
|
561 |
}
|
562 |
+
}, 200);
|
563 |
}
|
564 |
}
|
565 |
|
|
|
568 |
currentAssistantMessageElement = addMessageToChatbox('assistant', '', true);
|
569 |
}
|
570 |
sentenceBuffer += content;
|
571 |
+
// Update text content for live streaming display (Markdown rendering happens at the end)
|
572 |
currentAssistantMessageElement.textContent = sentenceBuffer;
|
573 |
chatbox.scrollTop = chatbox.scrollHeight;
|
574 |
|
575 |
+
// Sentence-based TTS
|
576 |
let searchStart = spokenTextPointer;
|
577 |
while (searchStart < sentenceBuffer.length) {
|
578 |
+
// Regex to find sentence endings more robustly, including after quotes or newlines
|
579 |
+
const sentenceEndMatch = sentenceBuffer.substring(searchStart).match(/([.?!])(\s|[\n\r]|$|["'])/);
|
580 |
if (sentenceEndMatch) {
|
581 |
const sentenceEndIndex = searchStart + sentenceEndMatch.index + sentenceEndMatch[1].length;
|
582 |
const textToSpeak = sentenceBuffer.substring(spokenTextPointer, sentenceEndIndex).trim();
|
|
|
585 |
speakText(textToSpeak);
|
586 |
spokenTextPointer = sentenceEndIndex;
|
587 |
}
|
588 |
+
searchStart = spokenTextPointer; // Continue search from new pointer
|
589 |
} else {
|
590 |
+
break; // No more complete sentences found yet
|
591 |
}
|
592 |
}
|
593 |
}
|
|
|
620 |
function addMessageToChatbox(role, text, isStreaming = false) {
|
621 |
const messageDiv = document.createElement('div');
|
622 |
messageDiv.classList.add('chat-bubble');
|
623 |
+
// For Markdown, set initial text content. Final HTML rendering happens after stream or for non-streamed.
|
624 |
messageDiv.textContent = text;
|
625 |
messageDiv.classList.add(role === 'user' ? 'user-bubble' : 'assistant-bubble');
|
626 |
if (role === 'assistant' && isStreaming) {
|
|
|
648 |
console.log("Disabling inputs.");
|
649 |
textInput.disabled = true;
|
650 |
sendButton.disabled = true;
|
651 |
+
if (recognition) { // Only disable recordButton if recognition is supported
|
652 |
recordButton.disabled = true;
|
653 |
recordButton.classList.add('opacity-50');
|
654 |
}
|
|
|
658 |
console.log("Enabling inputs.");
|
659 |
textInput.disabled = false;
|
660 |
sendButton.disabled = textInput.value.trim() === '' || isApiProcessing;
|
661 |
+
if (recognition) { // Only enable recordButton if recognition is supported
|
662 |
recordButton.disabled = false;
|
663 |
recordButton.classList.remove('opacity-50');
|
664 |
}
|
665 |
}
|
666 |
|
667 |
+
function stopListening(forceStop = false) { // Added forceStop parameter, though not explicitly used differently yet
|
668 |
+
if (!recognition) return; // Ensure recognition exists
|
669 |
const wasListening = isListening;
|
670 |
+
isListening = false; // Set state immediately
|
671 |
+
|
672 |
+
if (wasListening) { // Only act if it was actually listening
|
673 |
console.log("Stopping listening session.");
|
674 |
+
clearTimeout(restartTimer); // Clear any pending restart
|
675 |
updateButtonUI(false);
|
676 |
+
|
677 |
if (!isApiProcessing && !isSpeaking && ttsQueue.length === 0) {
|
678 |
statusDiv.textContent = 'Stopping...';
|
679 |
setTimeout(() => {
|
|
|
681 |
}, 500);
|
682 |
}
|
683 |
try {
|
684 |
+
recognition.abort(); // Abort ongoing recognition
|
685 |
console.log("Recognition aborted.");
|
686 |
} catch (e) {
|
687 |
console.warn("Error aborting recognition (might have already stopped):", e);
|
688 |
}
|
689 |
}
|
690 |
+
|
691 |
+
if (synth) { // Ensure synth exists
|
692 |
console.log("Cancelling any TTS on stopListening.");
|
693 |
+
synth.cancel(); // Stop any speech
|
694 |
+
ttsQueue = []; // Clear the queue
|
695 |
+
isSpeaking = false; // Reset speaking state
|
696 |
}
|
697 |
+
|
698 |
+
if (!isApiProcessing) { // Only enable inputs if API is not busy
|
699 |
enableInputs();
|
700 |
+
if (!isSpeaking && ttsQueue.length === 0) { // If TTS also idle, clear status
|
701 |
statusDiv.textContent = '';
|
702 |
}
|
703 |
}
|
704 |
}
|
705 |
|
706 |
function startListening() {
|
707 |
+
if (!recognition || isListening) return; // Ensure recognition exists and not already listening
|
708 |
+
|
709 |
+
// Check for microphone permission first
|
710 |
navigator.mediaDevices.getUserMedia({ audio: true })
|
711 |
.then(stream => {
|
712 |
+
// We got permission. We don't need to use the stream object itself here,
|
713 |
+
// as Web Speech API handles microphone access internally.
|
714 |
+
// It's good practice to stop the tracks immediately if you only needed the permission check.
|
715 |
stream.getTracks().forEach(track => track.stop());
|
716 |
console.log("Microphone permission granted or already available.");
|
717 |
+
|
718 |
isListening = true;
|
719 |
updateButtonUI(true);
|
720 |
statusDiv.textContent = 'Starting...';
|
|
|
723 |
} catch (e) {
|
724 |
console.error("Error starting recognition:", e);
|
725 |
statusDiv.textContent = "Error starting listening.";
|
726 |
+
isListening = false; // Reset state
|
727 |
updateButtonUI(false);
|
728 |
}
|
729 |
})
|
|
|
731 |
console.error("Microphone access error:", err);
|
732 |
if (err.name === 'NotAllowedError' || err.name === 'PermissionDeniedError') {
|
733 |
statusDiv.textContent = 'Microphone access denied.';
|
734 |
+
addMessageToChatbox('assistant', 'Error: Microphone access is required for voice input. Please allow microphone access in your browser settings.');
|
735 |
} else {
|
736 |
statusDiv.textContent = `Mic Error: ${err.name}`;
|
737 |
addMessageToChatbox('assistant', `Error accessing microphone: ${err.message}`);
|
738 |
}
|
739 |
+
isListening = false; // Reset state
|
740 |
updateButtonUI(false);
|
741 |
});
|
742 |
}
|
743 |
|
744 |
+
// --- Event Listeners & Initialization ---
|
745 |
+
if (recordButton && recognition) { // Ensure button exists before adding listener
|
746 |
+
recordButton.addEventListener('click', () => {
|
747 |
+
if (!isListening) {
|
748 |
+
startListening();
|
749 |
+
} else {
|
750 |
+
stopListening();
|
751 |
+
}
|
752 |
+
});
|
753 |
+
}
|
754 |
+
|
755 |
|
756 |
sendButton.addEventListener('click', () => {
|
757 |
const text = textInput.value.trim();
|
758 |
if (text && !isApiProcessing) {
|
759 |
handleUserInput(text);
|
760 |
textInput.value = '';
|
761 |
+
sendButton.disabled = true; // Disable after sending
|
762 |
}
|
763 |
});
|
764 |
|
765 |
textInput.addEventListener('keypress', (e) => {
|
766 |
+
if (e.key === 'Enter' && !e.shiftKey) { // Send on Enter, but not Shift+Enter
|
767 |
+
e.preventDefault(); // Prevent default newline in input
|
768 |
const text = textInput.value.trim();
|
769 |
+
if (text && !sendButton.disabled) { // Check if sendButton is enabled (not processing)
|
770 |
handleUserInput(text);
|
771 |
textInput.value = '';
|
772 |
+
sendButton.disabled = true; // Disable after sending
|
773 |
}
|
774 |
}
|
775 |
});
|
776 |
|
777 |
textInput.addEventListener('input', () => {
|
778 |
+
// Enable send button only if there's text and not processing
|
779 |
sendButton.disabled = textInput.value.trim() === '' || isApiProcessing;
|
780 |
});
|
781 |
|
782 |
+
// Initial Setup
|
783 |
+
chatbox.innerHTML = ''; // Clear chatbox on load
|
784 |
addMessageToChatbox('assistant', 'Hello! Use the microphone or type a message below.');
|
785 |
+
console.log("Voice/Text Chat App Initialized (OpenAI-compatible Streaming & Markdown Enabled)");
|
786 |
+
updateButtonUI(false); // Set initial button state
|
787 |
+
enableInputs(); // Enable inputs initially
|
788 |
|
789 |
</script>
|
790 |
|
|
|
832 |
}
|
833 |
}
|
834 |
|
835 |
+
let matrixInterval = setInterval(drawMatrix, 40); // Adjusted for potentially smoother animation
|
836 |
|
837 |
window.addEventListener('resize', () => {
|
838 |
const oldWidth = matrixCanvas.width;
|
839 |
const oldHeight = matrixCanvas.height;
|
840 |
+
|
841 |
matrixCanvas.width = window.innerWidth;
|
842 |
matrixCanvas.height = window.innerHeight;
|
843 |
|
844 |
+
// Re-initialize drops only if dimensions actually changed to avoid unnecessary resets
|
845 |
if (matrixCanvas.width !== oldWidth || matrixCanvas.height !== oldHeight) {
|
846 |
initializeMatrixDrops();
|
847 |
}
|