afouda commited on
Commit
1f0b3d6
·
verified ·
1 Parent(s): 6a507d0

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +687 -0
app.py ADDED
@@ -0,0 +1,687 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from Live_audio import GeminiHandler
2
+ import os
3
+ import re
4
+ from langdetect import detect
5
+ import asyncio
6
+ import gradio as gr
7
+ import google.generativeai as genai
8
+ import os
9
+ import time
10
+ import gradio as gr
11
+ from datetime import datetime
12
+ import langdetect
13
+ import RAG_Domain_know_doc
14
+ from web_search import search_autism
15
+ from RAG import rag_autism
16
+ from openai import OpenAI
17
+ from dotenv import load_dotenv
18
+ import Old_Document
19
+ import User_Specific_Documents
20
+ import asyncio
21
+ import base64
22
+ import time
23
+ from io import BytesIO
24
+ from dotenv import load_dotenv
25
+ load_dotenv()
26
+ from google.genai import types
27
+ from google.genai.types import (
28
+ LiveConnectConfig,
29
+ SpeechConfig,
30
+ VoiceConfig,
31
+ PrebuiltVoiceConfig,
32
+ Content,
33
+ Part,
34
+ )
35
+ import gradio as gr
36
+ import numpy as np
37
+ import websockets
38
+ from dotenv import load_dotenv
39
+ from fastrtc import (
40
+ AsyncAudioVideoStreamHandler,
41
+ Stream,
42
+ WebRTC,
43
+ get_cloudflare_turn_credentials_async,
44
+ wait_for_item,
45
+ )
46
+ from google import genai
47
+ from gradio.utils import get_space
48
+ from PIL import Image
49
+
50
+ # ------------------------------------------
51
+ import asyncio
52
+ import base64
53
+ import json
54
+ import os
55
+ import pathlib
56
+ import gradio as gr
57
+ import google.generativeai as genai
58
+ import os
59
+ import time
60
+ from typing import AsyncGenerator, Literal
61
+
62
+ import gradio as gr
63
+ import numpy as np
64
+ from dotenv import load_dotenv
65
+ from fastapi import FastAPI
66
+ from fastapi.responses import HTMLResponse
67
+ from fastrtc import (
68
+ AsyncStreamHandler,
69
+ Stream,
70
+ get_cloudflare_turn_credentials_async,
71
+ wait_for_item,
72
+ )
73
+ from google import genai
74
+ from google.genai.types import (
75
+ LiveConnectConfig,
76
+ PrebuiltVoiceConfig,
77
+ SpeechConfig,
78
+ VoiceConfig,
79
+ )
80
+ from gradio.utils import get_space
81
+ from pydantic import BaseModel
82
+ # ------------------------------------------------
83
+ import os
84
+ import gradio as gr
85
+ import google.generativeai as genai
86
+ import os
87
+ import time
88
+ import io
89
+ import asyncio
90
+ from pydub import AudioSegment
91
+ DEEPINFRA_API_KEY = "285LUJulGIprqT6hcPhiXtcrphU04FG4"
92
+
93
+ # Gemini: google-genai
94
+ from google import genai
95
+ # ---------------------------------------------------
96
+ # VAD imports from reference code
97
+ import collections
98
+ import webrtcvad
99
+ import fastrtc
100
+ import time
101
+
102
+ # helper functions
103
+
104
+ from prompt_template import (
105
+ Prompt_template_translation,
106
+ Prompt_template_LLM_Generation,
107
+ Prompt_template_Reranker,
108
+ Prompt_template_Wisal,
109
+ Prompt_template_Halluciations,
110
+ Prompt_template_paraphrasing,
111
+ Prompt_template_Translate_to_original,
112
+ Prompt_template_relevance,
113
+ Prompt_template_User_document_prompt
114
+ )
115
+ from query_utils import process_query_for_rewrite, get_non_autism_response
116
+
117
+ GEMINI_API_KEY="AIzaSyCUCivstFpC9pq_jMHMYdlPrmh9Bx97dFo"
118
+
119
+ TAVILY_API_KEY="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"
120
+
121
+ WEAVIATE_URL="yorcqe2sqswhcaivxvt9a.c0.us-west3.gcp.weaviate.cloud"
122
+
123
+ WEAVIATE_API_KEY="d2d0VGdZQTBmdTFlOWdDZl9tT2h3WDVWd1NpT1dQWHdGK0xjR1hYeWxicUxHVnFRazRUSjY2VlRUVlkwPV92MjAw"
124
+
125
+ DEEPINFRA_API_KEY="285LUJulGIprqT6hcPhiXtcrphU04FG4"
126
+
127
+ DEEPINFRA_BASE_URL="https://api.deepinfra.com/v1/openai"
128
+ # API Keys and Constants
129
+ env = os.getenv("ENVIRONMENT", "production")
130
+ openai = OpenAI(
131
+ api_key=DEEPINFRA_API_KEY,
132
+ base_url="https://api.deepinfra.com/v1/openai",
133
+ )
134
+ SESSION_ID = "default"
135
+
136
+ pending_clarifications = {}
137
+
138
+ def call_llm(model: str, messages: list[dict], temperature: float = 0.0, **kwargs) -> str:
139
+ resp = openai.chat.completions.create(
140
+ model=model,
141
+ messages=messages,
142
+ temperature=temperature,
143
+ **kwargs
144
+ )
145
+ return resp.choices[0].message.content.strip()
146
+
147
+ def is_greeting(text: str) -> bool:
148
+ return bool(re.search(r"\b(hi|hello|hey|good (morning|afternoon|evening))\b", text, re.I))
149
+
150
+
151
+
152
+ def process_query(query: str, first_turn: bool = False, session_id: str = "default"):
153
+ intro = ""
154
+ process_log = []
155
+
156
+ # Check if user is responding to a clarification prompt
157
+ if session_id in pending_clarifications:
158
+ if query.strip().lower() == "yes":
159
+ corrected_query = pending_clarifications.pop(session_id)
160
+ process_log.append(f"User confirmed: {corrected_query}")
161
+ return process_autism_pipeline(corrected_query, process_log, intro)
162
+ else:
163
+ pending_clarifications.pop(session_id)
164
+ redirect = "Hello I'm Wisal, an AI assistant developed by Compumacy AI, and a knowledgeable Autism specialist.\nIf you have any question related to autism please submit a question specifically about autism."
165
+ process_log.append("User rejected clarification.")
166
+ _save_process_log(process_log)
167
+ return redirect
168
+
169
+ if first_turn and (not query or query.strip() == ""):
170
+ intro = "Hello! I'm Wisal, an AI assistant developed by Compumacy AI, specializing in Autism Spectrum Disorders. How can I help you today?"
171
+ process_log.append(intro)
172
+ _save_process_log(process_log)
173
+ return intro
174
+
175
+ if is_greeting(query):
176
+ greeting = intro + "Hello! I'm Wisal, your AI assistant developed by Compumacy AI. How can I help you today?"
177
+ process_log.append(f"Greeting detected.\n{greeting}")
178
+ _save_process_log(process_log)
179
+ return greeting
180
+
181
+ # Process query with the new 3-tuple return
182
+ corrected_query, is_autism_related, rewritten_query = process_query_for_rewrite(query)
183
+ process_log.append(f"Original Query: {query}")
184
+ process_log.append(f"Corrected Query: {corrected_query}")
185
+ process_log.append(f"Relevance Check: {'RELATED' if is_autism_related else 'NOT RELATED'}")
186
+
187
+ if rewritten_query:
188
+ process_log.append(f"Rewritten Query: {rewritten_query}")
189
+
190
+ # If not autism-related, show clarification with rewritten question
191
+ if not is_autism_related:
192
+ redirect_message = "Hello I'm Wisal, an AI assistant developed by Compumacy AI, and a knowledgeable Autism specialist.\nIf you have any question related to autism please submit a question specifically about autism."
193
+
194
+ clarification = f"""Your query was not clearly related to autism. Do you mean:"{rewritten_query}"?"""
195
+
196
+ pending_clarifications[session_id] = rewritten_query
197
+ process_log.append(f"Clarification Prompted: {clarification}")
198
+ _save_process_log(process_log)
199
+ return clarification
200
+
201
+ return process_autism_pipeline(query,corrected_query, process_log, intro)
202
+
203
+ def process_autism_pipeline(query,corrected_query, process_log, intro):
204
+ web_search_resp = asyncio.run(search_autism(corrected_query))
205
+ web_answer = web_search_resp.get("answer", "")
206
+ process_log.append(f"Web Search: {web_answer}")
207
+
208
+ gen_prompt = Prompt_template_LLM_Generation.format(new_query=corrected_query)
209
+ generated = call_llm(
210
+ model="Qwen/Qwen3-32B",
211
+ messages=[{"role": "user", "content": gen_prompt}],
212
+ reasoning_effort="none"
213
+ )
214
+ process_log.append(f"LLM Generated: {generated}")
215
+
216
+ rag_resp = asyncio.run(rag_autism(corrected_query, top_k=3))
217
+ rag_contexts = rag_resp.get("answer", [])
218
+ process_log.append(f"RAG Contexts: {rag_contexts}")
219
+
220
+ answers_list = f"[1] {generated}\n[2] {web_answer}\n" + "\n".join(f"[{i+3}] {c}" for i, c in enumerate(rag_contexts))
221
+ rerank_prompt = Prompt_template_Reranker.format(new_query=corrected_query, answers_list=answers_list)
222
+ reranked = call_llm(
223
+ model="Qwen/Qwen3-32B",
224
+ messages=[{"role": "user", "content": rerank_prompt}],
225
+ reasoning_effort="none"
226
+ )
227
+ process_log.append(f"Reranked: {reranked}")
228
+
229
+ wisal_prompt = Prompt_template_Wisal.format(new_query=corrected_query, document=reranked)
230
+ wisal = call_llm(
231
+ model="Qwen/Qwen3-32B",
232
+ messages=[{"role": "user", "content": wisal_prompt}],
233
+ reasoning_effort="none"
234
+ )
235
+ process_log.append(f"Wisal Answer: {wisal}")
236
+
237
+ halluc_prompt = Prompt_template_Halluciations.format(
238
+ new_query=corrected_query,
239
+ answer=wisal,
240
+ document=generated
241
+ )
242
+ halluc = call_llm(
243
+ model="Qwen/Qwen3-32B",
244
+ messages=[{"role": "user", "content": halluc_prompt}],
245
+ reasoning_effort="none"
246
+ )
247
+ process_log.append(f"Hallucination Score: {halluc}")
248
+ score = int(halluc.split("Score: ")[-1]) if "Score: " in halluc else 3
249
+
250
+ if score in (2, 3):
251
+ paraphrased = call_llm(
252
+ model="Qwen/Qwen3-32B",
253
+ messages=[{"role": "user", "content": Prompt_template_paraphrasing.format(document=generated)}],
254
+ reasoning_effort="none"
255
+ )
256
+ wisal = call_llm(
257
+ model="Qwen/Qwen3-32B",
258
+ messages=[{"role": "user", "content": Prompt_template_Wisal.format(new_query=corrected_query, document=paraphrased)}],
259
+ reasoning_effort="none"
260
+ )
261
+ process_log.append(f"Paraphrased Wisal: {wisal}")
262
+
263
+ try:
264
+ detected_lang = detect(query)
265
+ except:
266
+ detected_lang = "en"
267
+
268
+
269
+ is_english_text = bool(re.fullmatch(r"[A-Za-z0-9 .,?;:'\"!()\-]+", query))
270
+
271
+ # Decide whether to translate
272
+ needs_translation = detected_lang != "en" or not is_english_text
273
+
274
+ if needs_translation:
275
+ result = call_llm(
276
+ model="Qwen/Qwen3-32B",
277
+ messages=[{
278
+ "role": "user",
279
+ "content": Prompt_template_Translate_to_original.format(query=query, document=wisal)
280
+ }],
281
+ reasoning_effort="none"
282
+ )
283
+ process_log.append(f"Translated Back: {result}")
284
+ else:
285
+ result = wisal
286
+ process_log.append(f"Final Result: {result}")
287
+ rtl_languages = ["ar", "fa", "ur", "he"] # Arabic, Persian, Urdu, Hebrew
288
+ text_dir = "rtl" if detected_lang in rtl_languages else "ltr"
289
+ # Wrap result in direction-aware HTML
290
+ wrapped_result = f'<div dir="{text_dir}">{result}</div>'
291
+ _save_process_log(process_log)
292
+ return intro + wrapped_result
293
+
294
+
295
+
296
+
297
+
298
+ def _save_process_log(log_lines, filename="process_output.txt"):
299
+ import datetime
300
+ logs_dir = os.path.join(os.path.dirname(__file__), "logs")
301
+ os.makedirs(logs_dir, exist_ok=True)
302
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
303
+ log_filename = os.path.join(logs_dir, f"log_{timestamp}.txt")
304
+ with open(log_filename, "w", encoding="utf-8") as f:
305
+ for line in log_lines:
306
+ f.write(str(line) + "\n\n")
307
+
308
+ def _save_process_log(log_lines, filename="process_output.txt"):
309
+ import datetime
310
+ import os
311
+ # Ensure logs directory exists
312
+ logs_dir = os.path.join(os.path.dirname(__file__), "logs")
313
+ os.makedirs(logs_dir, exist_ok=True)
314
+ # Unique filename per question (timestamped)
315
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
316
+ log_filename = os.path.join(logs_dir, f"log_{timestamp}.txt")
317
+ try:
318
+ with open(log_filename, "w", encoding="utf-8") as f:
319
+ for line in log_lines:
320
+ f.write(str(line) + "\n\n")
321
+ except Exception as e:
322
+ pass
323
+
324
+
325
+ # Gradio UI for main pipeline, RAG_Domain_know_doc, and User_Specific_Documents , Old_Document
326
+ def main_pipeline_interface(query):
327
+ return process_query(query, first_turn=True)
328
+
329
+ def main_pipeline_with_doc_and_history(query, doc_file, doc_type, history):
330
+ response = main_pipeline_with_doc(query, doc_file, doc_type)
331
+ updated_history = history + f"\nUser: {query}\nWisal: {response}\n"
332
+ return response, updated_history
333
+
334
+ def main_pipeline_with_doc(query, doc_file, doc_type):
335
+ # If no document, use main pipeline
336
+ if doc_file is None or doc_type == "None":
337
+ return process_query(query, first_turn=True)
338
+
339
+ safe_filename = os.path.basename(getattr(doc_file, 'name', str(doc_file)))
340
+ upload_dir = os.path.join(os.path.dirname(__file__), "uploaded_docs")
341
+ os.makedirs(upload_dir, exist_ok=True)
342
+
343
+ save_path = os.path.join(upload_dir, safe_filename)
344
+
345
+ # 💡 Check if doc_file is file-like (has `.read()`) or path-like (str or NamedString)
346
+ if hasattr(doc_file, 'read'):
347
+ # File-like object
348
+ file_bytes = doc_file.read()
349
+ else:
350
+ # It's a path (NamedString), read from file path
351
+ with open(str(doc_file), 'rb') as f:
352
+ file_bytes = f.read()
353
+
354
+ # Save the file content
355
+ with open(save_path, "wb") as f:
356
+ f.write(file_bytes)
357
+
358
+
359
+ # Route to correct document handler
360
+ if doc_type == "Knowledge Document":
361
+ status = RAG_Domain_know_doc.ingest_file(save_path)
362
+ answer = RAG_Domain_know_doc.answer_question(query)
363
+ return f"[Knowledge Document Uploaded]\n{status}\n\n{answer}"
364
+ elif doc_type == "User-Specific Document":
365
+ status = User_Specific_Documents.ingest_file(save_path)
366
+ answer = User_Specific_Documents.answer_question(query)
367
+ return f"[User-Specific Document Uploaded]\n{status}\n\n{answer}"
368
+ elif doc_type == "Old Document":
369
+ status = Old_Document.ingest_file(save_path)
370
+ answer = Old_Document.answer_question(query)
371
+ return f"[Old Document Uploaded]\n{status}\n\n{answer}"
372
+ elif doc_type == "New Documrnt":
373
+ status = User_Specific_Documents.ingest_file(save_path)
374
+ answer = User_Specific_Documents.answer_question(query)
375
+ return f"[New Documrnt]\n{status}\n\n{answer}"
376
+
377
+ else:
378
+ return "Invalid document type."
379
+
380
+ def pipeline_with_history(message, doc_file, doc_type, history):
381
+ if not message.strip():
382
+ return history, ""
383
+ response = main_pipeline_with_doc(message, doc_file, doc_type)
384
+ history = history + [[message, response]]
385
+ return history, ""
386
+
387
+ import gradio as gr
388
+ import google.generativeai as genai
389
+ import os
390
+ import time
391
+
392
+ # Function to transcribe audio
393
+ def transcribe_audio(audio_filepath):
394
+ api_key = "AIzaSyC68cQzvDYEnas6u-5ABgbOSeJLmIKKpP8"
395
+ if audio_filepath is None:
396
+ return "No audio provided. Please record or upload an audio file first."
397
+ if not api_key:
398
+ return "API Key is missing. Please provide your Google AI API key."
399
+ try:
400
+ genai.configure(api_key=api_key)
401
+
402
+ model = genai.GenerativeModel(model_name="models/gemini-2.0-flash") # Get the model you want to use
403
+
404
+ print(f"Transcribing audio file: {audio_filepath}")
405
+ yield "Uploading audio file..."
406
+
407
+ # Upload the audio file
408
+ audio_file = genai.upload_file(path=audio_filepath)
409
+
410
+ # Check the processing status of the uploaded file
411
+ while audio_file.state.name == "PROCESSING":
412
+ time.sleep(2) # Wait for 2 seconds before checking again
413
+ audio_file = genai.get_file(audio_file.name)
414
+
415
+ if audio_file.state.name == "FAILED":
416
+ return "[ERROR] Audio file processing failed."
417
+
418
+ yield "Audio uploaded. Transcribing..."
419
+
420
+ # Request transcription from the model
421
+ response = model.generate_content(
422
+ ["Please transcribe this audio recording.", audio_file],
423
+ request_options={"timeout": 120} # Set a timeout for the request
424
+ )
425
+
426
+ query = response.text if response and response.text else "Transcription failed. The response was empty."
427
+ yield query
428
+ except Exception as e:
429
+ print(f"An error occurred during transcription: {e}")
430
+ yield f"[ERROR] An unexpected error occurred: {e}"
431
+
432
+ def unified_handler(user_text, audio_file, chat_history):
433
+ chat_history = chat_history or []
434
+ msg_from_user = None
435
+
436
+ if user_text and user_text.strip():
437
+ msg_from_user = user_text
438
+ elif audio_file:
439
+ transcription = None
440
+ gen = transcribe_audio(audio_file)
441
+ try:
442
+ while True:
443
+ out = next(gen)
444
+ # Optional: Show progress in chat, if you want
445
+ if not out.startswith("[ERROR]"):
446
+ last_out = out
447
+ except StopIteration as e:
448
+ # If generator returns a value, it's in e.value
449
+ transcription = e.value if e.value else last_out
450
+ if transcription:
451
+ msg_from_user = transcription
452
+
453
+ if msg_from_user:
454
+ chat_history.append(("User", msg_from_user))
455
+ wisal_reply = process_query(msg_from_user)
456
+ chat_history.append(("Wisal", wisal_reply))
457
+ return chat_history, "", None
458
+
459
+ return chat_history, "", None
460
+
461
+
462
+ import gradio as gr
463
+ import asyncio
464
+
465
+ # Your process_query, transcribe_audio, and text_to_speech_ui functions should exist.
466
+
467
+ def wisal_handler(user_text, audio_file, chat_history):
468
+ # If user typed a message
469
+ if user_text and user_text.strip():
470
+ chat_history = chat_history or []
471
+ response = process_query(user_text)
472
+ chat_history.append(("User", user_text))
473
+ chat_history.append(("Wisal", response))
474
+ return chat_history, "", None # Clear input box
475
+
476
+ # If user provided audio
477
+ if audio_file:
478
+ transcription = None
479
+ gen = transcribe_audio(audio_file)
480
+ for out in gen:
481
+ if isinstance(out, str) and out.startswith("Uploading"):
482
+ continue
483
+ if isinstance(out, str) and not out.startswith("[ERROR]"):
484
+ transcription = out
485
+ if isinstance(out, str) and out.startswith("[ERROR]"):
486
+ chat_history.append(("System", out))
487
+ return chat_history, "", None
488
+ if transcription:
489
+ chat_history.append(("User", transcription)) # Show transcription!
490
+ wisal_reply = process_query(transcription)
491
+ chat_history.append(("Wisal", wisal_reply))
492
+ return chat_history, "", None
493
+
494
+
495
+ return chat_history, "", None # Nothing sent
496
+
497
+
498
+ # Make sure to escape backslashes in the file path (use raw strings or forward slashes)
499
+ image_path = r"C:\Users\Fouda\OneDrive\Desktop\Aya\Compumacy-Logo-Trans2.png" # Using a raw string
500
+
501
+ with gr.Blocks(title="Wisal Chatbot", theme='Yntec/HaleyCH_Theme_craiyon_alt') as demo:
502
+ chat_history = gr.State([])
503
+
504
+ # Add Image (local path)
505
+ with gr.Row():
506
+ gr.Image(value=image_path, show_label=False, container=False, height=100)
507
+
508
+ gr.Markdown("# 🤖 Wisal: Autism AI Assistant")
509
+
510
+ gr.CheckboxGroup(["Doctor", "Patient"], label="Checkbox Group")
511
+ chatbot = gr.Chatbot(label="Wisal Chat", height=500)
512
+ with gr.Row():
513
+ user_input = gr.Textbox(placeholder="Type your question here...", label="", lines=1)
514
+ audio_input = gr.Audio(
515
+ sources=["microphone", "upload"],
516
+ type="filepath",
517
+ label="Record or Upload Audio"
518
+ )
519
+ send_btn = gr.Button("Send", variant="primary")
520
+
521
+
522
+ send_btn.click(
523
+ fn=wisal_handler,
524
+ inputs=[user_input, audio_input, chat_history],
525
+ outputs=[chatbot, user_input, audio_input],
526
+ )
527
+
528
+ with gr.Row():
529
+ audio_output = gr.Audio(label="TTS Audio Output", interactive=True)
530
+ send_btn.click(
531
+ fn=wisal_handler,
532
+ inputs=[user_input, audio_input, chat_history],
533
+ outputs=[chatbot, user_input, audio_output],
534
+ api_name="wisal_handler"
535
+ )
536
+
537
+
538
+ with gr.Row() as row2:
539
+ with gr.Column():
540
+ webrtc2 = WebRTC(
541
+ label="Live Chat",
542
+ modality="audio",
543
+ mode="send-receive",
544
+ elem_id="audio-source",
545
+ rtc_configuration=get_cloudflare_turn_credentials_async,
546
+ icon="https://www.gstatic.com/lamda/images/gemini_favicon_f069958c85030456e93de685481c559f160ea06b.png",
547
+ pulse_color="rgb(255, 255, 255)",
548
+ icon_button_color="rgb(255, 255, 255)",
549
+ )
550
+ webrtc2.stream(
551
+ GeminiHandler(),
552
+ inputs=[webrtc2],
553
+ outputs=[webrtc2],
554
+ time_limit=180 if get_space() else None,
555
+ concurrency_limit=2 if get_space() else None,
556
+ )
557
+
558
+ doc_file = gr.File(label="📎 Upload Document (PDF, DOCX, TXT)", file_types=[".pdf", ".docx", ".txt"])
559
+
560
+ doc_type = gr.Radio(
561
+ ["None", "Knowledge Document", "User-Specific Document"],
562
+ value="None",
563
+ label="Document Type"
564
+ )
565
+
566
+ user_doc_option = gr.Radio(
567
+ ["New Document", "Old Document"],
568
+ label="Select User Document Type",
569
+ visible=False
570
+ )
571
+
572
+ def toggle_user_doc_visibility(selected_type):
573
+ return gr.update(visible=(selected_type == "User-Specific Document"))
574
+
575
+ doc_type.change(
576
+ toggle_user_doc_visibility,
577
+ inputs=doc_type,
578
+ outputs=user_doc_option
579
+ )
580
+
581
+ send_btn.click(
582
+ fn=pipeline_with_history,
583
+ inputs=[user_input, doc_file, doc_type, chatbot],
584
+ outputs=[chatbot, user_input]
585
+ )
586
+
587
+ clear_btn = gr.Button("Clear Chat", elem_id="clear-button")
588
+ clear_btn.click(lambda: [], outputs=[chatbot])
589
+
590
+ # Add custom theme CSS to the app
591
+ theme_css = """
592
+ /* Logo Row */
593
+ #logo-row {
594
+ display: flex;
595
+ justify-content: center;
596
+ align-items: center;
597
+ padding: 1rem;
598
+ background-color: #222222; /* Dark gray background for the logo row */
599
+ }
600
+
601
+ #logo-row img {
602
+ max-width: 300px;
603
+ object-fit: contain;
604
+ }
605
+
606
+ /* Send Button */
607
+ #send-button {
608
+ background-color: #f44336; en color for the Send button */
609
+ color: white;
610
+ font-size: 16px;
611
+ padding: 10px 24px;
612
+ border: none;
613
+ border-radius: 5px;
614
+ cursor: pointer;
615
+ }
616
+
617
+ #send-button:hover {
618
+ background-color: #e53935;
619
+ }
620
+
621
+ /* Clear Button */
622
+ #clear-button {
623
+ background-color: #f44336; /* Red color for the Clear button */
624
+ color: white;
625
+ font-size: 16px;
626
+ padding: 10px 24px;
627
+ border: none;
628
+ border-radius: 5px;
629
+ cursor: pointer;
630
+ }
631
+
632
+ #clear-button:hover {
633
+ background-color: #e53935; /* Darker red on hover */
634
+ }
635
+
636
+ /* Main Container Background */
637
+ .gradio-container {
638
+ background-color: #2C2C2C; /* Dark background color */
639
+ padding: 20px;
640
+ color: white;
641
+ }
642
+
643
+ /* Saved State Item */
644
+ .saved-state-item {
645
+ padding: 10px;
646
+ margin: 5px 0;
647
+ border-radius: 5px;
648
+ background-color: #333333; /* Dark gray background for saved state items */
649
+ color: #ffffff; /* White text color */
650
+ cursor: pointer;
651
+ transition: background-color 0.2s;
652
+ border: 1px solid #444444;
653
+ }
654
+
655
+ .saved-state-item:hover {
656
+ background-color: #444444; /* Slightly lighter gray on hover */
657
+ }
658
+
659
+ /* Delete Button */
660
+ .delete-button {
661
+ color: #ff6b6b; /* Red color for delete button */
662
+ margin-left: 10px;
663
+ float: right;
664
+ font-weight: bold;
665
+ }
666
+
667
+ /* Filesystem Sessions Container */
668
+ .filesystem-sessions-container {
669
+ max-height: 400px;
670
+ overflow-y: auto;
671
+ padding: 5px;
672
+ border: 1px solid #444;
673
+ border-radius: 5px;
674
+ background-color: #222222; /* Dark background for the session container */
675
+ }
676
+
677
+ /* Highlight effect when clicking */
678
+ .saved-state-item:active {
679
+ background-color: #555555; /* Darker gray when clicking */
680
+ }
681
+ """
682
+
683
+
684
+ demo.css = theme_css
685
+
686
+ if __name__ == "__main__":
687
+ demo.launch(debug=True)