mmccanse
update instructions
3e68c22
raw
history blame
3.67 kB
#!/usr/bin/env python
# coding: utf-8
# In[1]:
# pip install langdetect
# pip install sentencepiece
# pip install boto3
# pip install awscli
# pip install sacremoses
# In[2]:
import gradio as gr
from transformers import pipeline, AutoTokenizer, TFAutoModelForSeq2SeqLM
from dotenv import load_dotenv
import os
import subprocess
import torch
import tempfile
from langdetect import detect
from transformers import MarianMTModel, MarianTokenizer
import re
import boto3
# In[3]:
# import functions from functions file
from functions_mm import handle_query, transcribe_audio_original, submit_question, polly_text_to_speech, translate, translate_and_speech, clear_inputs, voice_map, language_map, default_language, languages
# In[4]:
# Load environment variables.
load_dotenv()
# Set the model name for our LLMs.
OPENAI_MODEL = "gpt-3.5-turbo"
# Store the API key in a variable.
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# In[5]:
instructions = """
# Diabetes Chatbot
#### Step 1: Record your audio OR input text (NOT both!) -- Step 2: Would you like response in new language? Choose your language
#### Step 3: Submit question -- Step 4: Translate response -- Step 5: Clear inputs and start fresh
"""
with gr.Blocks() as app2:
with gr.Row():
gr.Markdown(instructions)
with gr.Row():
input_audio = gr.Audio(
label="Ask a question about Diabetes, then click 'Transcribe audio",
type="filepath")
language_dropdown = gr.Dropdown(label="Click the middle of the dropdown bar to select translation language",
choices=list(language_map.keys()), value=default_language, type='value')
with gr.Row():
transcribe_button = gr.Button("Transcribe audio")
submit_button = gr.Button("Submit your question")
translate_button = gr.Button("Translate the response")
clear_button = gr.Button("Clear All")
#Divide the screen horizontally into 2 columns
with gr.Row():
#This column will be on the left side of screen
with gr.Column():
query_text = gr.Textbox(label="Type your question here. If there is audio recorded AND question text, app will submit question text. Click transcribe button to populate with audio text")
# output_original_speech = gr.Audio(label="Text to speech here")
with gr.Column():
response_text = gr.Textbox(label="Chatbot response")
response_speech = gr.Audio(label="Chatbot response speech")
#This column will be on the right side of screen
with gr.Column():
output_translated = gr.Textbox(label="Translated text")
output_translated_speech = gr.Audio(label="Translated speech")
# Audio transcription
transcribe_button.click(
fn=transcribe_audio_original,
inputs=[input_audio],
outputs=[query_text]
)
submit_button.click(
fn=submit_question,
inputs=[input_audio, query_text, language_dropdown],
outputs=[response_text, response_speech]
)
# Translation
translate_button.click(
fn=translate_and_speech,
inputs=[response_text, language_dropdown],
outputs=[output_translated, output_translated_speech]
)
#Clearing all inputs and outputs
clear_button.click(
fn=clear_inputs,
inputs=[],
outputs=[input_audio, query_text, response_text, response_speech, output_translated, output_translated_speech]
)
app2.launch(show_error=True, share=True)