File size: 20,279 Bytes
410fd66
 
 
 
a6dea81
410fd66
573cc21
36b230a
573cc21
fcdc0cf
36b230a
578b499
40f86f1
cd51bec
 
e763e30
 
 
 
36b230a
 
578b499
 
 
 
 
36b230a
fcdc0cf
e763e30
 
 
 
cd51bec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e763e30
 
 
 
903aaa1
 
 
 
 
 
 
 
e763e30
 
fcdc0cf
 
 
 
 
e763e30
cd51bec
fcdc0cf
 
e763e30
fcdc0cf
 
36b230a
fcdc0cf
 
 
 
 
 
 
 
e763e30
 
 
fcdc0cf
578b499
fcdc0cf
 
36b230a
bc64286
 
 
e763e30
 
 
bc64286
e763e30
bc64286
 
36b230a
bc64286
fcdc0cf
 
 
bc64286
68390a5
573cc21
fcdc0cf
573cc21
578b499
45a579f
573cc21
fcdc0cf
573cc21
578b499
bc64286
36b230a
a6dea81
e763e30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a6dea81
e763e30
578b499
 
 
 
 
 
 
 
 
a6dea81
40f86f1
 
 
 
e763e30
40f86f1
 
 
 
 
 
 
 
 
e763e30
578b499
573cc21
578b499
 
a6dea81
e763e30
bde85f0
 
 
573cc21
578b499
 
 
 
 
 
e763e30
578b499
 
 
 
e763e30
fcdc0cf
e763e30
d2ad93f
36b230a
e763e30
573cc21
578b499
 
36b230a
578b499
e763e30
432d77e
578b499
 
fcdc0cf
 
578b499
 
a6dea81
 
578b499
36b230a
a6dea81
 
578b499
573cc21
cd51bec
 
a6dea81
cd51bec
 
578b499
e763e30
fcdc0cf
 
b3715d5
e763e30
 
 
 
 
01f403f
b3715d5
 
e763e30
 
01f403f
 
 
e13a0db
 
 
 
01f403f
 
e13a0db
 
01f403f
e13a0db
 
01f403f
e13a0db
01f403f
e13a0db
 
 
 
 
b7ce4b0
 
 
 
 
 
 
 
 
 
 
e763e30
e13a0db
 
 
 
01f403f
 
 
 
 
 
 
 
 
 
e763e30
1a20fbb
e763e30
 
1a20fbb
 
a6dea81
578b499
e763e30
410fd66
e763e30
 
cd51bec
 
 
 
e763e30
 
 
 
 
 
 
 
 
 
 
 
cd51bec
 
 
e763e30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
903aaa1
 
 
e763e30
 
 
 
 
 
 
 
 
 
 
410fd66
 
e763e30
bde85f0
 
 
578b499
40f86f1
 
9bb4c44
 
cd51bec
 
9bb4c44
40f86f1
bde85f0
 
 
 
578b499
40f86f1
9b183d6
578b499
40f86f1
9bb4c44
40f86f1
9bb4c44
40f86f1
9bb4c44
 
432d77e
a6dea81
578b499
410fd66
b694114
 
410fd66
e763e30
36b230a
578b499
a6dea81
410fd66
b694114
578b499
e763e30
 
 
bc64286
a6dea81
36b230a
578b499
a6dea81
410fd66
578b499
e763e30
578b499
e13a0db
578b499
b7ce4b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
import gradio as gr
import librosa
import numpy as np
import os
import hashlib
from datetime import datetime
from transformers import pipeline
import soundfile
import torch
from tenacity import retry, stop_after_attempt, wait_fixed
import logging
import tempfile
import shutil
from simple_salesforce import Salesforce
from dotenv import load_dotenv
import pyttsx3
from cryptography.fernet import Fernet
import asyncio
import base64

# Set up logging
logging.basicConfig(
    level=logging.DEBUG,
    format="%(asctime)s - %(levelname)s - %(message)s",
    handlers=[logging.FileHandler("voice_analyzer.log"), logging.StreamHandler()]
)
logger = logging.getLogger(__name__)

# Load environment variables
load_dotenv()

# Salesforce configuration
SF_USERNAME = os.getenv("SF_USERNAME")
SF_PASSWORD = os.getenv("SF_PASSWORD")
SF_SECURITY_TOKEN = os.getenv("SF_SECURITY_TOKEN")
SF_ENABLED = all([SF_USERNAME, SF_PASSWORD, SF_SECURITY_TOKEN])
sf = None
if SF_ENABLED:
    try:
        sf = Salesforce(
            username=SF_USERNAME,
            password=SF_PASSWORD,
            security_token=SF_SECURITY_TOKEN
        )
        logger.info("Salesforce connection established")
    except Exception as e:
        logger.error(f"Salesforce connection failed: {str(e)}")
        SF_ENABLED = False

# Encryption setup (AES-256)
ENCRYPTION_KEY = os.getenv("ENCRYPTION_KEY") or Fernet.generate_key()
fernet = Fernet(ENCRYPTION_KEY)

# Initialize text-to-speech with fallback
tts_engine = None
try:
    tts_engine = pyttsx3.init()
    tts_engine.setProperty("rate", 150)
    logger.info("pyttsx3 initialized successfully")
except Exception as e:
    logger.warning(f"Failed to initialize pyttsx3: {str(e)}. Text-to-speech disabled.")

# Initialize local models
@retry(stop=stop_after_attempt(3), wait=wait_fixed(2))
def load_whisper_model():
    try:
        model = pipeline(
            "automatic-speech-recognition",
            model="openai/whisper-large-v3",
            device=-1,  # CPU; use device=0 for GPU
            model_kwargs={"use_safetensors": True}
        )
        logger.info("Whisper-large-v3 model loaded successfully")
        return model
    except Exception as e:
        logger.error(f"Failed to load Whisper model: {str(e)}")
        raise

@retry(stop=stop_after_attempt(3), wait=wait_fixed(2))
def load_symptom_model():
    try:
        model = pipeline(
            "text-classification",
            model="abhirajeshbhai/symptom-2-disease-net",
            device=-1,
            model_kwargs={"use_safetensors": True},
            return_all_scores=False
        )
        logger.info("Symptom-2-Disease model loaded successfully")
        return model
    except Exception as e:
        logger.error(f"Failed to load Symptom-2-Disease model: {str(e)}")
        try:
            model = pipeline(
                "text-classification",
                model="distilbert-base-uncased",
                device=-1,
                return_all_scores=False
            )
            logger.warning("Fallback to distilbert-base-uncased model")
            return model
        except Exception as fallback_e:
            logger.error(f"Fallback model failed: {str(fallback_e)}")
            raise

whisper = None
symptom_classifier = None
is_fallback_model = False

try:
    whisper = load_whisper_model()
except Exception as e:
    logger.error(f"Whisper model initialization failed: {str(e)}")

try:
    symptom_classifier = load_symptom_model()
except Exception as e:
    logger.error(f"Symptom model initialization failed: {str(e)}")
    symptom_classifier = None
    is_fallback_model = True

def encrypt_data(data):
    """Encrypt data using AES-256."""
    try:
        if isinstance(data, str):
            data = data.encode()
        return fernet.encrypt(data).decode()
    except Exception as e:
        logger.error(f"Encryption failed: {str(e)}")
        return None

def decrypt_data(data):
    """Decrypt AES-256 encrypted data."""
    try:
        return fernet.decrypt(data.encode()).decode()
    except Exception as e:
        logger.error(f"Decryption failed: {str(e)}")
        return None

def compute_file_hash(file_path):
    """Compute MD5 hash of encrypted file."""
    try:
        hash_md5 = hashlib.md5()
        with open(file_path, "rb") as f:
            for chunk in iter(lambda: f.read(4096), b""):
                hash_md5.update(chunk)
        return hash_md5.hexdigest()
    except Exception as e:
        logger.error(f"Failed to compute file hash: {str(e)}")
        return "unknown"

def ensure_writable_dir(directory):
    """Ensure directory exists and is writable."""
    try:
        os.makedirs(directory, exist_ok=True)
        test_file = os.path.join(directory, "test")
        with open(test_file, "w") as f:
            f.write("test")
        os.remove(test_file)
        logger.debug(f"Directory {directory} is writable")
        return True
    except Exception as e:
        logger.error(f"Directory {directory} not writable: {str(e)}")
        return False

async def transcribe_audio(audio_file, language="en"):
    """Transcribe audio using Whisper model."""
    if not whisper:
        logger.error("Whisper model not loaded")
        return "Error: Whisper model not loaded"
    try:
        logger.debug(f"Transcribing audio: {audio_file} (language: {language})")
        if not isinstance(audio_file, (str, bytes, os.PathLike)) or not os.path.exists(audio_file):
            logger.error(f"Invalid or missing audio file: {audio_file}")
            return "Error: Invalid or missing audio file"
        audio, sr = librosa.load(audio_file, sr=16000)
        if len(audio) < 1600:
            logger.error("Audio too short")
            return "Error: Audio too short (<0.1s)"
        if np.max(np.abs(audio)) < 1e-4:
            logger.error("Audio too quiet")
            return "Error: Audio too quiet"

        with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_wav:
            temp_path = temp_wav.name
            soundfile.write(audio, sr, temp_path)
            logger.debug(f"Saved temp WAV: {temp_path}")

        with torch.no_grad():
            result = whisper(temp_path, language=language, generate_kwargs={"num_beams": 5})
        transcription = result.get("text", "").strip()
        logger.info(f"Transcription: {transcription}")

        try:
            os.remove(temp_path)
            logger.debug(f"Deleted temp WAV: {temp_path}")
        except Exception as e:
            logger.error(f"Failed to delete temp WAV: {str(e)}")

        if not transcription:
            logger.error("Transcription empty")
            return "Error: Transcription empty"
        words = transcription.split()
        if len(words) > 5 and len(set(words)) < len(words) / 2:
            logger.error("Transcription repetitive")
            return "Error: Transcription repetitive"
        return transcription
    except Exception as e:
        logger.error(f"Transcription failed: {str(e)}")
        return f"Error: {str(e)}"

def analyze_symptoms(text):
    """Analyze symptoms using Symptom-2-Disease model."""
    if not symptom_classifier:
        logger.error("Symptom model not loaded")
        return "Error: Symptom model not loaded", 0.0
    try:
        if not text or not isinstance(text, str) or "Error" in text:
            logger.error(f"Invalid text input: {text}")
            return "Error: No valid transcription", 0.0

        with torch.no_grad():
            result = symptom_classifier(text)
        logger.debug(f"Raw model output: {result}")

        # Exhaustive output validation
        prediction = "No health condition detected"
        score = 0.0

        # Handle all possible output types
        if result is None:
            logger.warning("Model output is None")
        elif isinstance(result, (str, int, float, bool)):
            logger.warning(f"Invalid model output type: {type(result)}, value: {result}")
        elif isinstance(result, (tuple, list)):
            # Flatten nested tuples/lists
            flattened = []
            def flatten(item, depth=0, max_depth=10):
                if depth > max_depth:
                    logger.warning(f"Max recursion depth exceeded: {item}")
                    return
                if isinstance(item, (tuple, list)):
                    for subitem in item:
                        flatten(subitem, depth + 1, max_depth)
                elif isinstance(item, dict):
                    flattened.append(item)
                else:
                    logger.warning(f"Skipping non-dict item: {item}")
            flatten(result)
            if not flattened:
                logger.warning("Flattened model output is empty")
            elif not all(isinstance(item, dict) for item in flattened):
                logger.warning(f"Non-dictionary items in flattened result: {flattened}")
            elif not all("label" in item and "score" in item for item in flattened):
                logger.warning(f"Missing label or score in flattened result: {flattened}")
            else:
                # Sort by score (descending) and take the highest
                valid_items = [
                    item for item in flattened
                    if isinstance(item, dict) and "label" in item and "score" in item
                    and isinstance(item["label"], str)
                    and isinstance(item["score"], (int, float)) and 0 <= item["score"] <= 1
                ]
                if valid_items:
                    sorted_items = sorted(valid_items, key=lambda x: x["score"], reverse=True)
                    prediction = sorted_items[0]["label"]
                    score = sorted_items[0]["score"]
        elif isinstance(result, dict):
            logger.debug("Model returned single dictionary")
            if "label" in result and "score" in result:
                prediction = result["label"]
                score = result["score"]
            else:
                logger.warning(f"Missing label or score in dictionary: {result}")

        # Validate prediction and score
        if not isinstance(prediction, str):
            logger.warning(f"Invalid label type: {type(prediction)}, value: {prediction}")
            prediction = "No health condition detected"
        if not isinstance(score, (int, float)) or score < 0 or score > 1:
            logger.warning(f"Invalid score: {score}")
            score = 0.0

        if is_fallback_model:
            logger.warning("Using fallback DistilBERT model")
            prediction = f"{prediction} (distilbert)"
        logger.info(f"Prediction: {prediction}, Score: {score:.4f}")
        return prediction, score
    except Exception as e:
        logger.error(f"Symptom analysis failed: {str(e)}")
        return "Error: Symptom analysis failed", 0.0

def save_to_salesforce(user_id, transcription, prediction, score, feedback, consent_granted):
    """Save analysis results to Salesforce."""
    if not SF_ENABLED or not sf:
        logger.debug("Salesforce integration disabled or not connected")
        return
    try:
        if consent_granted:
            encrypted_transcription = encrypt_data(transcription)
            encrypted_feedback = encrypt_data(feedback)
            sf.Health_Analysis__c.create({
                "User_ID__c": user_id,
                "Transcription__c": encrypted_transcription[:255],
                "Prediction__c": prediction[:255],
                "Confidence_Score__c": float(score),
                "Feedback__c": encrypted_feedback[:255],
                "Analysis_Date__c": datetime.utcnow().strftime("%Y-%m-%d")
            })
            logger.info("Saved analysis to Salesforce")
    except Exception as e:
        logger.error(f"Failed to save to Salesforce: {str(e)}")

def generate_report():
    """Generate usage report via Salesforce."""
    if not SF_ENABLED or not sf:
        return "Error: Salesforce not connected"
    try:
        query = "SELECT COUNT(Id), Prediction__c FROM Health_Analysis__c GROUP BY Prediction__c"
        result = sf.query(query)
        report = "Health Analysis Report\n"
        for record in result["records"]:
            count = record["expr0"]
            prediction = record["Prediction__c"]
            report += f"Condition: {prediction}, Count: {count}\n"
        logger.info("Generated usage report")
        return report
    except Exception as e:
        logger.error(f"Failed to generate report: {str(e)}")
        return f"Error: {str(e)}"

async def speak_response(text):
    """Convert text to speech."""
    if not tts_engine:
        logger.warning("Text-to-speech unavailable; skipping")
        return
    try:
        def sync_speak():
            tts_engine.say(text)
            tts_engine.runAndWait()
        loop = asyncio.get_event_loop()
        await loop.run_in_executor(None, sync_speak)
        logger.debug("Spoke response")
    except Exception as e:
        logger.error(f"Text-to-speech failed: {str(e)}")

async def analyze_voice(audio_file, language="en", user_id="anonymous", consent_granted=True):
    """Analyze voice for health indicators."""
    try:
        logger.debug(f"Starting analysis for audio_file: {audio_file}, language: {language}")
        if audio_file is None or not isinstance(audio_file, (str, bytes, os.PathLike)):
            logger.error(f"Invalid audio file input: {audio_file}")
            return "Error: No audio file provided"
        
        temp_dir = os.path.join(tempfile.gettempdir(), "gradio")
        if not ensure_writable_dir(temp_dir):
            fallback_dir = os.path.join(os.getcwd(), "temp_gradio")
            if not ensure_writable_dir(fallback_dir):
                logger.error(f"Temp directories {temp_dir} and {fallback_dir} not writable")
                return "Error: Temp directories not writable"
            temp_dir = fallback_dir
        
        if not os.path.exists(audio_file):
            logger.error(f"Audio file not found: {audio_file}")
            return "Error: Audio file not found"
        
        unique_path = os.path.join(
            temp_dir,
            f"audio_{datetime.utcnow().strftime('%Y%m%d%H%M%S%f')}_{os.path.basename(audio_file or 'unknown.wav')}"
        )
        try:
            shutil.copy(audio_file, unique_path)
            audio_file = unique_path
            logger.debug(f"Copied to: {audio_file}")
        except Exception as e:
            logger.error(f"Failed to copy audio file: {str(e)}")
            return f"Error: Failed to copy audio file: {str(e)}"
        
        file_hash = compute_file_hash(audio_file)
        logger.info(f"Processing audio, Hash: {file_hash}")
        
        audio, sr = librosa.load(audio_file, sr=16000)
        logger.info(f"Audio loaded: shape={audio.shape}, SR={sr}, Duration={len(audio)/sr:.2f}s")
        
        transcription = await transcribe_audio(audio_file, language)
        if "Error" in transcription:
            logger.error(f"Transcription error: {transcription}")
            return transcription
        
        if any(keyword in transcription.lower() for keyword in ["medicine", "treatment"]):
            logger.warning("Medication query detected")
            feedback = "Error: This tool does not provide medication advice"
            await speak_response(feedback)
            return feedback
        
        prediction, score = analyze_symptoms(transcription)
        if "Error" in prediction:
            logger.error(f"Symptom analysis error: {prediction}")
            return prediction
        
        feedback = (
            "No health condition detected, consult a doctor if symptoms persist. This is not a medical diagnosis."
            if prediction == "No health condition detected"
            else f"Possible {prediction.lower()} detected based on symptoms like '{transcription.lower()}', consult a doctor. This is not a medical diagnosis."
        )
        logger.info(f"Feedback: {feedback}, Transcription: {transcription}, Prediction: {prediction}, Score: {score:.4f}")
        
        # Save to Salesforce
        save_to_salesforce(user_id, transcription, prediction, score, feedback, consent_granted)
        
        try:
            os.remove(audio_file)
            logger.debug(f"Deleted audio file: {audio_file}")
        except Exception as e:
            logger.error(f"Failed to delete audio file: {str(e)}")
        
        # Speak response
        await speak_response(feedback)
        
        return feedback
    except Exception as e:
        logger.error(f"Voice analysis failed: {str(e)}")
        return f"Error: {str(e)}"

async def test_with_sample_audio(language="en", user_id="anonymous", consent_granted=True):
    """Test with synthetic audio."""
    temp_dir = os.path.join(tempfile.gettempdir(), "audio_samples")
    if not ensure_writable_dir(temp_dir):
        fallback_dir = os.path.join(os.getcwd(), "temp_audio_samples")
        if not ensure_writable_dir(fallback_dir):
            logger.error(f"Temp directories {temp_dir} and {fallback_dir} not writable")
            return f"Error: Temp directories not writable"
        temp_dir = fallback_dir
    
    sample_audio_path = os.path.join(temp_dir, "dummy_test.wav")
    logger.info(f"Generating synthetic audio at: {sample_audio_path}")
    sr = 16000
    t = np.linspace(0, 2, 2 * sr)
    freq_mod = 440 + 10 * np.sin(2 * np.pi * 0.5 * t)
    amplitude_mod = 0.5 + 0.1 * np.sin(2 * np.pi * 0.3 * t)
    noise = 0.01 * np.random.normal(0, 1, len(t))
    dummy_audio = amplitude_mod * np.sin(2 * np.pi * freq_mod * t) + noise
    try:
        soundfile.write(dummy_audio, sr, sample_audio_path)
        logger.info(f"Generated synthetic audio: {sample_audio_path}")
    except Exception as e:
        logger.error(f"Failed to write synthetic audio: {str(e)}")
        return f"Error: Failed to generate synthetic audio: {str(e)}"
    
    if not os.path.exists(sample_audio_path):
        logger.error(f"Synthetic audio not created: {sample_audio_path}")
        return f"Error: Synthetic audio not created: {sample_audio_path}"
    
    mock_transcription = "I have a cough and sore throat"
    logger.info(f"Mock transcription: {mock_transcription}")
    prediction, score = analyze_symptoms(mock_transcription)
    feedback = (
        "No health condition detected, consult a doctor if symptoms persist. This is not a medical diagnosis."
        if prediction == "No health condition detected"
        else f"Possible {prediction.lower()} detected based on symptoms like '{mock_transcription.lower()}', consult a doctor. This is not a medical diagnosis."
    )
    logger.info(f"Test feedback: {feedback}, Prediction: {prediction}, Score: {score:.4f}")
    
    # Save to Salesforce
    save_to_salesforce(user_id, mock_transcription, prediction, score, feedback, consent_granted)
    
    try:
        os.remove(sample_audio_path)
        logger.debug(f"Deleted test audio: {sample_audio_path}")
    except Exception:
        pass
    return feedback

async def voicebot_interface(audio_file, language="en", user_id="anonymous", consent_granted=True):
    """Gradio interface wrapper."""
    return await analyze_voice(audio_file, language, user_id, consent_granted)

# Gradio interface
iface = gr.Interface(
    fn=voicebot_interface,
    inputs=[
        gr.Audio(type="filepath", label="Record or Upload Voice (WAV, MP3, FLAC, 1+ sec)"),
        gr.Dropdown(["en", "es", "hi", "zh"], label="Language", value="en"),
        gr.Textbox(label="User ID (optional)", value="anonymous"),
        gr.Checkbox(label="Consent to store data", value=True)
    ],
    outputs=gr.Textbox(label="Health Assessment Feedback"),
    title="Smart Voicebot for Public Health",
    description="Record or upload a voice sample describing symptoms (e.g., 'I have a cough') for preliminary health assessment. Supports English, Spanish, Hindi, Mandarin. Not a diagnostic tool. Data is encrypted and stored with consent. Complies with HIPAA/GDPR."
)

if __name__ == "__main__":
    logger.info("Starting Voice Health Analyzer")
    # Test with synthetic audio
    loop = asyncio.get_event_loop()
    print(loop.run_until_complete(test_with_sample_audio()))
    iface.launch(server_name="0.0.0.0", server_port=7860)