{ "cells": [ { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "# Read in documents using LangChain's loaders\n", "# Take everything in all the sub-folders of our knowledgebase\n", "\n", "import glob\n", "import os\n", "\n", "# imports for langchain, plotly and Chroma\n", "\n", "from langchain.document_loaders import DirectoryLoader, TextLoader\n", "from langchain.text_splitter import CharacterTextSplitter\n", "from langchain.schema import Document\n", "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.chat_models import ChatOpenAI\n", "from langchain_chroma import Chroma\n", "import matplotlib.pyplot as plt\n", "from sklearn.manifold import TSNE\n", "import numpy as np\n", "import plotly.graph_objects as go\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.chains import ConversationalRetrievalChain\n", "from langchain.embeddings import HuggingFaceEmbeddings" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Total number of chunks: 6800\n", "Document types found: {'academic_calendar', 'admissions', 'research', 'tuition', 'about', 'resources', 'contact', 'policies', 'academics', 'sports', 'scholarships', 'financial_aid', 'events', 'exchange', 'campus', 'student_support', 'news'}\n" ] } ], "source": [ "folders = glob.glob(\"usiu-knowledge-base/*\")\n", "\n", "def add_metadata(doc, doc_type):\n", " doc.metadata[\"doc_type\"] = doc_type\n", " return doc\n", "\n", "# With thanks to CG and Jon R, students on the course, for this fix needed for some users \n", "text_loader_kwargs = {'encoding': 'utf-8'}\n", "# If that doesn't work, some Windows users might need to uncomment the next line instead\n", "# text_loader_kwargs={'autodetect_encoding': True}\n", "\n", "documents = []\n", "for folder in folders:\n", " doc_type = os.path.basename(folder)\n", " loader = DirectoryLoader(folder, glob=\"**/*.md\", loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)\n", " folder_docs = loader.load()\n", " documents.extend([add_metadata(doc, doc_type) for doc in folder_docs])\n", "\n", "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n", "chunks = text_splitter.split_documents(documents)\n", "\n", "print(f\"Total number of chunks: {len(chunks)}\")\n", "print(f\"Document types found: {set(doc.metadata['doc_type'] for doc in documents)}\")" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'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'" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# Load environment variables in a file called .env\n", "\n", "from dotenv import load_dotenv\n", "\n", "\n", "load_dotenv()\n", "# os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY', 'your-key-if-not-using-env')\n", "os.getenv('OPENAI_API_KEY')" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "6800\n", "[Document(metadata={'doc_type': 'research', 'source': 'usiu-knowledge-base/research/20250330031216_apply_now_admission_requirements.md'}, page_content='# Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/admission-requirements/'), Document(metadata={'doc_type': 'tuition', 'source': 'usiu-knowledge-base/tuition/20250330203921_apply_now_admission_requirements.md'}, page_content='# Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/admission-requirements/'), Document(metadata={'doc_type': 'sports', 'source': 'usiu-knowledge-base/sports/20250330090137_apply_now_admission_requirements.md'}, page_content='# Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/admission-requirements/'), Document(metadata={'doc_type': 'admissions', 'source': 'usiu-knowledge-base/admissions/20250330003916_apply_now_admission_requirements.md'}, page_content='# Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/admission-requirements/'), Document(metadata={'doc_type': 'tuition', 'source': 'usiu-knowledge-base/tuition/20250330205539_apply_now_doctoral_admission_requirements.md'}, page_content='# Doctoral Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/doctoral-admission-requirements/'), Document(metadata={'doc_type': 'student_support', 'source': 'usiu-knowledge-base/student_support/20250330045635_apply_now_doctoral_admission_requirements.md'}, page_content='# Doctoral Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/doctoral-admission-requirements/'), Document(metadata={'doc_type': 'admissions', 'source': 'usiu-knowledge-base/admissions/20250330004404_apply_now_doctoral_admission_requirements.md'}, page_content='# Doctoral Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/doctoral-admission-requirements/'), Document(metadata={'doc_type': 'events', 'source': 'usiu-knowledge-base/events/20250330133137_apply_now_home.md'}, page_content='# USIU-Africa URL: https://www.usiu.ac.ke/apply-now/home/'), Document(metadata={'doc_type': 'resources', 'source': 'usiu-knowledge-base/resources/20250330124605_apply_now_home.md'}, page_content='# USIU-Africa URL: https://www.usiu.ac.ke/apply-now/home/'), Document(metadata={'doc_type': 'scholarships', 'source': 'usiu-knowledge-base/scholarships/20250331003106_apply_now_home.md'}, page_content='# USIU-Africa URL: https://www.usiu.ac.ke/apply-now/home/')]\n", "Retrieved Documents: 10\n", "Retrieved Document Content:\n", "# Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/admission-requirements/\n", "--------------------------------------------------\n", "Retrieved Document Content:\n", "# Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/admission-requirements/\n", "--------------------------------------------------\n", "Retrieved Document Content:\n", "# Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/admission-requirements/\n", "--------------------------------------------------\n", "Retrieved Document Content:\n", "# Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/admission-requirements/\n", "--------------------------------------------------\n", "Retrieved Document Content:\n", "# Doctoral Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/doctoral-admission-requirements/\n", "--------------------------------------------------\n", "Retrieved Document Content:\n", "# Doctoral Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/doctoral-admission-requirements/\n", "--------------------------------------------------\n", "Retrieved Document Content:\n", "# Doctoral Admission Requirements - USIU-Africa URL: https://www.usiu.ac.ke/apply-now/doctoral-admission-requirements/\n", "--------------------------------------------------\n", "Retrieved Document Content:\n", "# USIU-Africa URL: https://www.usiu.ac.ke/apply-now/home/\n", "--------------------------------------------------\n", "Retrieved Document Content:\n", "# USIU-Africa URL: https://www.usiu.ac.ke/apply-now/home/\n", "--------------------------------------------------\n", "Retrieved Document Content:\n", "# USIU-Africa URL: https://www.usiu.ac.ke/apply-now/home/\n", "--------------------------------------------------\n" ] } ], "source": [ "# Put the chunks of data into a Vector Store that associates a Vector Embedding with each chunk\n", "# Chroma is a popular open source Vector Database based on SQLLite\n", "\n", "# If you would rather use the free Vector Embeddings from HuggingFace sentence-transformers\n", "# Then replace embeddings = OpenAIEmbeddings()\n", "# with:\n", "# from langchain.embeddings import HuggingFaceEmbeddings\n", "# embeddings = HuggingFaceEmbeddings(model_name=\"sentence-transformers/all-MiniLM-L6-v2\")\n", "\n", "embeddings = OpenAIEmbeddings()\n", "\n", "db_name = \"./vector_services/usiu_vector_db\"\n", "\n", "from langchain.vectorstores import Chroma\n", "\n", "# Retrieve a document using the VectorStore\n", "vectorstore = Chroma(persist_directory=db_name, embedding_function=embeddings)\n", "\n", "# Example query\n", "query = \"How do I get admitted at USIU-Africa?\"\n", "\n", "# Perform a similarity search to find the most relevant documents\n", "docs = vectorstore.similarity_search(query, k=10)\n", "print(docs)\n", "\n", "print(f\"Retrieved Documents: {len(docs)}\")\n", "# Print the retrieved document contents.\n", "for doc in docs:\n", " print(\"Retrieved Document Content:\")\n", " print(doc.page_content)\n", " print(\"-\" * 50)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from configs.config import GPT4O_MODEL as MODEL\n", "\n", "\n", "# create a new Chat with OpenAI\n", "llm = ChatOpenAI(temperature=0.7, model_name=MODEL)\n", "\n", "# Alternative - if you'd like to use Ollama locally, uncomment this line instead\n", "# llm = ChatOpenAI(temperature=0.7, model_name='llama3.2', base_url='http://localhost:11434/v1', api_key='ollama')\n", "\n", "# set up the conversation memory for the chat\n", "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", "\n", "# the retriever is an abstraction over the VectorStore that will be used during RAG\n", "retriever = vectorstore.as_retriever()\n", "\n", "# putting it together: set up the conversation chain with the GPT 3.5 LLM, the vector store and memory\n", "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "result = conversation_chain.invoke({\"question\": query})\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# set up a new conversation memory for the chat\n", "memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)\n", "\n", "# putting it together: set up the conversation chain with the GPT 4o-mini LLM, the vector store and memory\n", "conversation_chain = ConversationalRetrievalChain.from_llm(llm=llm, retriever=retriever, memory=memory)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Wrapping that in a function\n", "\n", "def chat(question, history):\n", " result = conversation_chain.invoke({\"question\": question})\n", " return result[\"answer\"]" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/var/folders/x4/dd2xz8_d4fjbb7kzdq5sp8_40000gn/T/ipykernel_56308/3678441278.py:129: UserWarning: You have not specified a value for the `type` parameter. Defaulting to the 'tuples' format for chatbot messages, but this is deprecated and will be removed in a future version of Gradio. Please set type='messages' instead, which uses openai-style dictionaries with 'role' and 'content' keys.\n", " chatbot = gr.Chatbot(\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Loaded vector store with 0 documents from './usiu_vector_db'.\n", "* Running on local URL: http://127.0.0.1:7881\n", "\n", "To create a public link, set `share=True` in `launch()`.\n" ] }, { "data": { "text/html": [ "
" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import gradio as gr\n", "import os\n", "import time\n", "import openai # if you plan to use it for API calls\n", "\n", "# ---------------------------\n", "# Helper Functions\n", "# ---------------------------\n", "\n", "def read_file(file_obj):\n", " \"\"\"\n", " Reads and decodes the content of an uploaded file.\n", " \"\"\"\n", " file_obj.seek(0)\n", " content = file_obj.read()\n", " # Ensure the content is a string.\n", " return content.decode(\"utf-8\") if isinstance(content, bytes) else content\n", "\n", "# Dummy Global Retriever setup for RAG integration.\n", "# Replace or modify this section with your actual vectorstore and retriever.\n", "try:\n", " from vector_services.data_curator import DataCurator\n", " curator = DataCurator(knowledge_base_dir=\"../usiu-knowledge-base\")\n", " curator.load_vectorstore()\n", " GLOBAL_RETRIEVER = curator.get_retriever()\n", "except Exception as e:\n", " GLOBAL_RETRIEVER = None\n", "\n", "def process_chat(user_message, chat_history, file):\n", " \"\"\"\n", " Processes the user input and file upload:\n", " - If no message is provided but a file is uploaded, the filename (and its content) is used.\n", " - Inserts a header for a new conversation.\n", " - Optionally queries a retriever for additional context (RAG style).\n", " - For demonstration purposes, echoes back the user message.\n", " \n", " Returns:\n", " A tuple (chatbot_history, state_history) so that both the Chatbot display and the internal state are updated.\n", " \"\"\"\n", " # Handle the file case when there is no text input.\n", " if not user_message.strip() and file is not None:\n", " user_message = f\"Uploaded file: {os.path.basename(file.name)}\"\n", " try:\n", " file_content = read_file(file)\n", " except Exception as e:\n", " chat_history.append((\"Error\", f\"Error reading file: {str(e)}\"))\n", " return chat_history, chat_history\n", " # Append file content as additional context.\n", " user_message += f\"\\n\\n(File Content): {file_content}\"\n", " elif not user_message.strip():\n", " # If no text nor file provided, do nothing.\n", " return chat_history, chat_history\n", "\n", " # Add a header if this is the start of a new conversation.\n", " if not chat_history:\n", " date_str = time.strftime(\"%Y-%m-%d %H:%M\", time.localtime())\n", " chat_history.append((\"System\", f\"Conversation started on {date_str}\"))\n", "\n", " # Append the user's message.\n", " chat_history.append((\"User\", user_message))\n", "\n", " # --- RAG (Retrieval-Augmented Generation) Integration ---\n", " if \"GLOBAL_RETRIEVER\" in globals() and GLOBAL_RETRIEVER is not None:\n", " try:\n", " docs = GLOBAL_RETRIEVER.get_relevant_documents(user_message)\n", " if docs:\n", " # Concatenate retrieved document content as additional context.\n", " context = \"\\n\\n\".join(doc.page_content for doc in docs)\n", " chat_history.append((\"System\", f\"Additional Context:\\n{context}\"))\n", " except Exception as e:\n", " print(\"RAG retrieval failed:\", e)\n", "\n", " # --- Chat API Call (Dummy Response) ---\n", " # Here you would normally call your ChatCompletion (or Claude) API with streaming.\n", " # For demo purposes, we simply echo the user message.\n", " response = \"Echo: \" + user_message\n", " chat_history.append((\"Bot\", response))\n", " \n", " # Return the updated chat history for both the Chatbot display and internal state.\n", " return chat_history, chat_history\n", "\n", "def reset_chat():\n", " \"\"\"\n", " Resets the chat history.\n", " \"\"\"\n", " return []\n", "\n", "# ---------------------------\n", "# Custom CSS & JavaScript\n", "# ---------------------------\n", "\n", "css = \"\"\"\n", "/* Custom CSS for Chatbot styling */\n", "#chatbot {\n", " border: 2px solid #4CAF50;\n", " border-radius: 5px;\n", " padding: 10px;\n", " margin-bottom: 10px;\n", "}\n", "\"\"\"\n", "\n", "# Custom JavaScript to disable the send button when there is no text.\n", "custom_js = \"\"\"\n", "\n", "\"\"\"\n", "\n", "# ---------------------------\n", "# Gradio UI using Blocks\n", "# ---------------------------\n", "\n", "with gr.Blocks(css=css, title=\"Gradio Chat Interface Example\") as demo:\n", " gr.Markdown(\"

Chat Interface Example

\")\n", " \n", " # Chat display using Chatbot component\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " show_copy_all_button=True,\n", " show_copy_button=True,\n", " show_share_button=True,\n", " allow_file_downloads=True,\n", " allow_tags=True,\n", " layout=\"panel\"\n", " )\n", " # This state holds the conversation history as a list of (role, message) tuples.\n", " state = gr.State([])\n", "\n", " with gr.Row():\n", " with gr.Column(scale=8):\n", " chat_input = gr.Textbox(\n", " placeholder=\"Type your message here...\",\n", " label=\"Your Message\",\n", " elem_id=\"chat_input\"\n", " )\n", " with gr.Column(scale=2):\n", " file_input = gr.File(label=\"Upload a file (optional)\")\n", " with gr.Column(scale=2):\n", " # The send button includes a chat icon (emoji).\n", " send_btn = gr.Button(\"Send 💬\", elem_id=\"send_btn\", variant=\"primary\")\n", " \n", " with gr.Row():\n", " new_chat_btn = gr.Button(\"New Chat\", variant=\"secondary\")\n", " \n", " # When the send button is clicked, process the message (and file if any) and update both the Chatbot and state.\n", " send_btn.click(\n", " fn=process_chat,\n", " inputs=[chat_input, state, file_input],\n", " outputs=[chatbot, state]\n", " )\n", " # Reset the conversation history when \"New Chat\" is clicked.\n", " new_chat_btn.click(\n", " fn=reset_chat,\n", " inputs=[],\n", " outputs=state\n", " )\n", " \n", " # Inject custom JavaScript.\n", " gr.HTML(custom_js)\n", "\n", "# Launch the interface.\n", "demo.launch()\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "ename": "ImportError", "evalue": "cannot import name 'HumeVoiceClient' from 'hume' (/opt/anaconda3/envs/llms/lib/python3.11/site-packages/hume/__init__.py)", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mhume\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m HumeVoiceClient, MicrophoneInterface\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mdotenv\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m load_dotenv\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mos\u001b[39;00m\n", "\u001b[0;31mImportError\u001b[0m: cannot import name 'HumeVoiceClient' from 'hume' (/opt/anaconda3/envs/llms/lib/python3.11/site-packages/hume/__init__.py)" ] } ], "source": [ "from hume import HumeVoiceClient, MicrophoneInterface\n", "from dotenv import load_dotenv\n", "import os\n", "\n", "\n", "load_dotenv()\n", "# avoid hard coding your API key, retrieve from environment variables\n", "HUME_API_KEY = os.getenv(\"HUMEAI_API_KEY\")\n", "HUME_CONFIG_ID = os.getenv(\"HUMEAI_CONFIG_ID\")\n", "# Connect and authenticate with Hume\n", "client = HumeVoiceClient(HUME_API_KEY)\n", "# establish a connection with EVI with your configuration by passing\n", "# the config_id as an argument to the connect method\n", "async with client.connect(config_id=HUME_CONFIG_ID) as socket:\n", " await MicrophoneInterface.start(socket)\n" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "import asyncio\n", "import base64\n", "import datetime\n", "import os\n", "from dotenv import load_dotenv\n", "from hume.client import AsyncHumeClient\n", "from hume.empathic_voice.chat.socket_client import ChatConnectOptions, ChatWebsocketConnection\n", "from hume.empathic_voice.chat.types import SubscribeEvent\n", "from hume.empathic_voice.types import UserInput\n", "from hume.core.api_error import ApiError\n", "from hume import MicrophoneInterface, Stream" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "class WebSocketHandler:\n", " \"\"\"Interface for containing the EVI WebSocket and associated socket handling behavior.\"\"\"\n", "\n", " def __init__(self):\n", " \"\"\"Construct the WebSocketHandler, initially assigning the socket to None and the byte stream to a new Stream object.\"\"\"\n", " self.socket = None\n", " self.byte_strs = Stream.new()\n", "\n", " def set_socket(self, socket: ChatWebsocketConnection):\n", " \"\"\"Set the socket.\"\"\"\n", " self.socket = socket\n", "\n", " async def on_open(self):\n", " \"\"\"Logic invoked when the WebSocket connection is opened.\"\"\"\n", " print(\"WebSocket connection opened.\")\n", "\n", " async def on_message(self, message: SubscribeEvent):\n", " \"\"\"Callback function to handle a WebSocket message event.\n", " \n", " This asynchronous method decodes the message, determines its type, and \n", " handles it accordingly. Depending on the type of message, it \n", " might log metadata, handle user or assistant messages, process\n", " audio data, raise an error if the message type is \"error\", and more.\n", "\n", " See the full list of \"Receive\" messages in the API Reference.\n", " \"\"\"\n", "\n", " if message.type == \"chat_metadata\":\n", " chat_id = message.chat_id\n", " chat_group_id = message.chat_group_id\n", " # ...\n", " elif message.type in [\"user_message\", \"assistant_message\"]:\n", " role = message.message.role.upper()\n", " message_text = message.message.content\n", " # ...\n", " elif message.type == \"audio_output\":\n", " message_str: str = message.data\n", " message_bytes = base64.b64decode(message_str.encode(\"utf-8\"))\n", " await self.byte_strs.put(message_bytes)\n", " return\n", " elif message.type == \"error\":\n", " error_message = message.message\n", " error_code = message.code\n", " raise ApiError(f\"Error ({error_code}): {error_message}\")\n", " \n", " # Print timestamp and message\n", " # ...\n", " \n", " async def on_close(self):\n", " \"\"\"Logic invoked when the WebSocket connection is closed.\"\"\"\n", " print(\"WebSocket connection closed.\")\n", "\n", " async def on_error(self, error):\n", " \"\"\"Logic invoked when an error occurs in the WebSocket connection.\"\"\"\n", " print(f\"Error: {error}\")\n" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "async def main() -> None:\n", " # Retrieve any environment variables stored in the .env file\n", " load_dotenv()\n", "\n", " # Retrieve the API key, Secret key, and EVI config id from the environment variables\n", " HUMEAI_API_KEY = os.getenv(\"HUMEAI_API_KEY\")\n", " HUMEAI_SECRET_KEY = os.getenv(\"HUMEAI_SECRET_KEY\")\n", " HUMEAI_CONFIG_ID = os.getenv(\"HUMEAI_CONFIG_ID\")\n", "\n", " # Initialize the asynchronous client, authenticating with your API key\n", " client = AsyncHumeClient(api_key=HUME_API_KEY)\n", "\n", " # Define options for the WebSocket connection, such as an EVI config id and a secret key for token authentication\n", " options = ChatConnectOptions(config_id=HUME_CONFIG_ID, secret_key=HUME_SECRET_KEY)\n", " \n", " # ...\n" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "ename": "ModuleNotFoundError", "evalue": "No module named 'empathic_client'", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[8], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mempathic_client\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m EmpathicClient\n\u001b[1;32m 2\u001b[0m \u001b[38;5;66;03m# from empathic import EmpathicClient\u001b[39;00m\n\u001b[1;32m 5\u001b[0m client \u001b[38;5;241m=\u001b[39m EmpathicClient(api_key\u001b[38;5;241m=\u001b[39mHUME_API_KEY)\n", "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'empathic_client'" ] } ], "source": [ "from empathic_client import EmpathicClient\n", "# from empathic import EmpathicClient\n", "\n", "\n", "client = EmpathicClient(api_key=HUME_API_KEY)\n", "\n", "async def main() -> None:\n", " \n", "# Retrieve the API key, Secret key, and EVI config id from the environment variables\n", " HUMEAI_API_KEY = os.getenv(\"HUMEAI_API_KEY\")\n", " HUMEAI_SECRET_KEY = os.getenv(\"HUMEAI_SECRET_KEY\")\n", " HUMEAI_CONFIG_ID = os.getenv(\"HUMEAI_CONFIG_ID\")\n", "\n", "\n", " # Define options for the WebSocket connection, such as an EVI config id and a secret key for token authentication\n", " options = ChatConnectOptions(config_id=HUME_CONFIG_ID, secret_key=HUMEAI_SECRET_KEY)\n", "\n", " # Instantiate the WebSocketHandler\n", " websocket_handler = WebSocketHandler()\n", "\n", " # Open the WebSocket connection with the configuration options and the handler's functions\n", "async with client.empathic_voice.chat.connect_with_callbacks(\n", " options=options,\n", " on_open=websocket_handler.on_open,\n", " on_message=websocket_handler.on_message,\n", " on_close=websocket_handler.on_close,\n", " on_error=websocket_handler.on_error\n", ") as socket:\n", " \n", " # Set the socket instance in the handler\n", " websocket_handler.set_socket(socket)\n", " # ...\n" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "async def main() -> None:\n", " # Open the WebSocket connection with the configuration options and the handler's functions\n", " async with client.empathic_voice.chat.connect_with_callbacks(...) as socket:\n", " # Set the socket instance in the handler\n", " websocket_handler.set_socket(socket)\n", "\n", " # Create an asynchronous task to continuously detect and process input from the microphone, as well as play audio\n", " microphone_task = asyncio.create_task(\n", " MicrophoneInterface.start(\n", " socket,\n", " byte_stream=websocket_handler.byte_strs\n", " )\n", " )\n", " \n", " # Await the microphone task\n", " await microphone_task\n", "\n" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "ename": "NameError", "evalue": "name 'websocket_handler' is not defined", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[7], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mwebsocket_handler\u001b[49m\u001b[38;5;241m.\u001b[39mset_socket(socket)\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# Specify device 4 in MicrophoneInterface\u001b[39;00m\n\u001b[1;32m 4\u001b[0m MicrophoneInterface\u001b[38;5;241m.\u001b[39mstart(\n\u001b[1;32m 5\u001b[0m socket,\n\u001b[1;32m 6\u001b[0m \u001b[38;5;66;03m# device=4,\u001b[39;00m\n\u001b[1;32m 7\u001b[0m allow_user_interrupt\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[1;32m 8\u001b[0m byte_stream\u001b[38;5;241m=\u001b[39mwebsocket_handler\u001b[38;5;241m.\u001b[39mbyte_strs\n\u001b[1;32m 9\u001b[0m )\n", "\u001b[0;31mNameError\u001b[0m: name 'websocket_handler' is not defined" ] } ], "source": [ "websocket_handler.set_socket(socket)\n", "\n", "# Specify device 4 in MicrophoneInterface\n", "MicrophoneInterface.start(\n", " socket,\n", "# device=4,\n", " allow_user_interrupt=True,\n", " byte_stream=websocket_handler.byte_strs\n", ")\n" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "# Retrieve any environment variables stored in the .env file\n", "load_dotenv()\n", "\n", "# Retrieve the API key, Secret key, and EVI config id from the environment variables\n", "HUMEAI_API_KEY = os.getenv(\"HUMEAI_API_KEY\")\n", "HUMEAI_SECRET_KEY = os.getenv(\"HUMEAI_SECRET_KEY\")\n", "HUMEAI_CONFIG_ID = os.getenv(\"HUMEAI_CONFIG_ID\")\n", "\n", "import asyncio\n", "\n", "from hume.client import AsyncHumeClient\n", "\n", "client = AsyncHumeClient(api_key=HUMEAI_API_KEY)\n", "\n", "async def main() -> None:\n", " await client.empathic_voice.configs.list_configs()\n", "\n", "import nest_asyncio\n", "nest_asyncio.apply()\n", "\n", "asyncio.run(main())\n", "# await main()" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/opt/anaconda3/envs/llms/lib/python3.11/site-packages/pygments/regexopt.py:77: RuntimeWarning: coroutine 'main' was never awaited\n", " '|'.join(regex_opt_inner(list(group[1]), '')\n", "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n" ] }, { "ename": "ImportError", "evalue": "cannot import name 'HumeVoiceClient' from 'hume' (/opt/anaconda3/envs/llms/lib/python3.11/site-packages/hume/__init__.py)", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[14], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;21;01mhume\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m HumeVoiceClient, MicrophoneInterface\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# Connect and authenticate with Hume\u001b[39;00m\n\u001b[1;32m 4\u001b[0m client \u001b[38;5;241m=\u001b[39m HumeVoiceClient(HUMEAI_API_KEY)\n", "\u001b[0;31mImportError\u001b[0m: cannot import name 'HumeVoiceClient' from 'hume' (/opt/anaconda3/envs/llms/lib/python3.11/site-packages/hume/__init__.py)" ] } ], "source": [ "from hume import HumeVoiceClient, MicrophoneInterface\n", "\n", "# Connect and authenticate with Hume\n", "client = HumeVoiceClient(HUMEAI_API_KEY)\n", "# establish a connection with EVI with your configuration by passing\n", "# the config_id as an argument to the connect method\n", "async with client.connect(config_id=HUMEAI_CONFIG_ID) as socket:\n", " await MicrophoneInterface.start(socket)\n" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [], "source": [ "import asyncio\n", "import base64\n", "import datetime\n", "import os\n", "from dotenv import load_dotenv\n", "import nest_asyncio\n", "from hume import MicrophoneInterface\n", "from hume.client import AsyncHumeClient\n", "from hume.empathic_voice.chat.socket_client import ChatConnectOptions, ChatWebsocketConnection\n", "from hume.empathic_voice.types import UserInput\n", "from hume.core.api_error import ApiError\n", "\n", "# Apply nest_asyncio to make asyncio work in Jupyter\n", "nest_asyncio.apply()\n", "\n", "class WebSocketHandler:\n", " \"\"\"Handles WebSocket events for EVI chat.\"\"\"\n", " \n", " def __init__(self):\n", " \"\"\"Initialize the WebSocket handler.\"\"\"\n", " self.socket = None\n", " self.byte_strs = asyncio.Queue()\n", " self.is_speaking = False\n", " self.transcript = \"\"\n", " self.conversation_history = []\n", " self.audio_playback_task = None\n", " \n", " def set_socket(self, socket: ChatWebsocketConnection):\n", " \"\"\"Set the WebSocket connection.\"\"\"\n", " self.socket = socket\n", " \n", " async def on_open(self):\n", " \"\"\"Handle WebSocket connection open event.\"\"\"\n", " print(\"WebSocket connection opened.\")\n", " # Start audio playback task\n", " self.audio_playback_task = asyncio.create_task(self.handle_audio_playback())\n", " \n", " async def handle_audio_playback(self):\n", " \"\"\"Process audio from the queue and play it.\"\"\"\n", " try:\n", " while True:\n", " # Get audio bytes from the queue\n", " audio_bytes = await self.byte_strs.get()\n", " # Play the audio (simplified for example)\n", " print(\"Playing audio chunk...\")\n", " # In a real implementation, you would use a library like sounddevice to play the audio\n", " \n", " # Mark task as done\n", " self.byte_strs.task_done()\n", " except Exception as e:\n", " print(f\"Audio playback error: {e}\")\n", " \n", " async def on_message(self, message):\n", " \"\"\"\n", " Handle incoming WebSocket messages.\n", " \n", " Args:\n", " message: The WebSocket message data.\n", " \"\"\"\n", " try:\n", " # Debug the message type\n", " print(f\"Received message type: {type(message).__name__}\")\n", " \n", " # Handle different message types based on their attributes\n", " if hasattr(message, 'transcript') and message.transcript:\n", " self.transcript = message.transcript\n", " print(f\"Transcript: {self.transcript}\")\n", " \n", " elif hasattr(message, 'audio'):\n", " if not self.is_speaking and hasattr(message, 'text'):\n", " self.is_speaking = True\n", " print(f\"Assistant: {message.text}\")\n", " print(\"Assistant is speaking...\")\n", " \n", " # Queue the audio bytes for playback\n", " if message.audio:\n", " await self.byte_strs.put(message.audio)\n", " \n", " elif hasattr(message, 'speaking_complete') and message.speaking_complete:\n", " self.is_speaking = False\n", " print(\"Assistant finished speaking.\")\n", " \n", " # Add the completed exchange to conversation history\n", " if hasattr(message, 'text') and self.transcript:\n", " self.conversation_history.append({\"user\": self.transcript, \"assistant\": message.text})\n", " self.transcript = \"\"\n", " \n", " elif hasattr(message, 'error_message'):\n", " print(f\"Error: {message.error_message}\")\n", " \n", " except Exception as e:\n", " print(f\"Message handling error: {e}\")\n", " \n", " async def on_close(self):\n", " \"\"\"Handle WebSocket connection close event.\"\"\"\n", " print(\"WebSocket connection closed.\")\n", " if self.audio_playback_task:\n", " self.audio_playback_task.cancel()\n", " try:\n", " await self.audio_playback_task\n", " except asyncio.CancelledError:\n", " pass\n", " \n", " async def on_error(self, error: Exception):\n", " \"\"\"\n", " Handle WebSocket error event.\n", " \n", " Args:\n", " error: The error that occurred.\n", " \"\"\"\n", " print(f\"WebSocket error: {error}\")\n", " \n", " async def send_text_message(self, text: str):\n", " \"\"\"\n", " Send a text message to EVI.\n", " \n", " Args:\n", " text: The text to send.\n", " \"\"\"\n", " if self.socket:\n", " user_input = UserInput(text=text)\n", " await self.socket.send_user_input(user_input)\n", " print(f\"Sent text message: {text}\")\n", " else:\n", " print(\"WebSocket not connected.\")\n", "\n", "async def main():\n", " \"\"\"Main function to run the EVI chat application.\"\"\"\n", " # Retrieve any environment variables stored in the .env file\n", " load_dotenv()\n", " \n", " # Retrieve the API credentials from environment variables\n", " HUME_API_KEY = os.getenv(\"HUMEAI_API_KEY\")\n", " HUME_SECRET_KEY = os.getenv(\"HUMEAI_SECRET_KEY\")\n", " HUME_CONFIG_ID = os.getenv(\"HUMEAI_CONFIG_ID\")\n", " \n", " # Validate credentials are available\n", " if not all([HUME_API_KEY, HUME_SECRET_KEY, HUME_CONFIG_ID]):\n", " raise ValueError(\"Missing required environment variables. Please set HUME_API_KEY, HUME_SECRET_KEY, and HUME_CONFIG_ID.\")\n", " \n", " # Initialize the asynchronous client\n", " client = AsyncHumeClient(api_key=HUME_API_KEY)\n", " \n", " # Define options for the WebSocket connection\n", " options = ChatConnectOptions(config_id=HUME_CONFIG_ID, secret_key=HUME_SECRET_KEY)\n", " \n", " # Instantiate the WebSocketHandler\n", " websocket_handler = WebSocketHandler()\n", " \n", " try:\n", " # Open the WebSocket connection with the handler's callbacks\n", " async with client.empathic_voice.chat.connect_with_callbacks(\n", " options=options,\n", " on_open=websocket_handler.on_open,\n", " on_message=websocket_handler.on_message,\n", " on_close=websocket_handler.on_close,\n", " on_error=websocket_handler.on_error\n", " ) as socket:\n", " # Set the socket instance in the handler\n", " websocket_handler.set_socket(socket)\n", " \n", " # Create an asynchronous task for microphone handling\n", " # Check the latest implementation of MicrophoneInterface\n", " try:\n", " print(\"Setting up microphone interface...\")\n", " # Try using just the socket\n", " mic_interface = MicrophoneInterface(socket)\n", " microphone_task = asyncio.create_task(mic_interface.start())\n", " print(\"Microphone interface started successfully\")\n", " except TypeError as e:\n", " print(f\"MicrophoneInterface error: {e}\")\n", " # Try the alternative approach with explicit byte_stream\n", " print(\"Trying alternative microphone setup...\")\n", " microphone_task = asyncio.create_task(\n", " MicrophoneInterface.start(\n", " socket, \n", " byte_stream=websocket_handler.byte_strs\n", " )\n", " )\n", " \n", " # For testing, send an initial text message\n", " await websocket_handler.send_text_message(\"Hello, I'm testing this voice interface.\")\n", " \n", " # Wait for user input to exit\n", " try:\n", " print(\"Press Ctrl+C to exit...\")\n", " await asyncio.Future() # Run indefinitely until interrupted\n", " except asyncio.CancelledError:\n", " pass\n", " finally:\n", " # Cancel the microphone task\n", " microphone_task.cancel()\n", " try:\n", " await microphone_task\n", " except asyncio.CancelledError:\n", " pass\n", " \n", " except ApiError as e:\n", " print(f\"API Error: {e}\")\n", " except Exception as e:\n", " print(f\"Unexpected error: {e}\")\n", "\n", "# Let's create a simpler, text-only version to test the basic functionality\n", "async def text_only_chat():\n", " \"\"\"Run EVI in text-only mode for testing.\"\"\"\n", " load_dotenv()\n", " \n", " HUME_API_KEY = os.getenv(\"HUME_API_KEY\")\n", " HUME_SECRET_KEY = os.getenv(\"HUME_SECRET_KEY\")\n", " HUME_CONFIG_ID = os.getenv(\"HUME_CONFIG_ID\")\n", " \n", " if not all([HUME_API_KEY, HUME_SECRET_KEY, HUME_CONFIG_ID]):\n", " raise ValueError(\"Missing required environment variables\")\n", " \n", " client = AsyncHumeClient(api_key=HUME_API_KEY)\n", " options = ChatConnectOptions(config_id=HUME_CONFIG_ID, secret_key=HUME_SECRET_KEY)\n", " websocket_handler = WebSocketHandler()\n", " \n", " async with client.empathic_voice.chat.connect_with_callbacks(\n", " options=options,\n", " on_open=websocket_handler.on_open,\n", " on_message=websocket_handler.on_message,\n", " on_close=websocket_handler.on_close,\n", " on_error=websocket_handler.on_error\n", " ) as socket:\n", " websocket_handler.set_socket(socket)\n", " \n", " # Simulate conversation with text input\n", " messages = [\n", " \"Hello, how are you today?\",\n", " \"Tell me about yourself\",\n", " \"What can you help me with?\"\n", " ]\n", " \n", " for msg in messages:\n", " print(f\"\\nSending message: {msg}\")\n", " await websocket_handler.send_text_message(msg)\n", " # Wait for response processing\n", " await asyncio.sleep(5)\n", " \n", " print(\"\\nText-only chat completed.\")\n", "\n", "def run_evi_chat():\n", " \"\"\"Run the EVI chat application.\"\"\"\n", " try:\n", " asyncio.run(main())\n", " except KeyboardInterrupt:\n", " print(\"Application terminated by user.\")\n", " except Exception as e:\n", " print(f\"Error running application: {e}\")\n", "\n", "def run_text_only():\n", " \"\"\"Run text-only chat in Jupyter.\"\"\"\n", " try:\n", " asyncio.run(text_only_chat())\n", " except KeyboardInterrupt:\n", " print(\"Text-only chat terminated by user.\")\n", " except Exception as e:\n", " print(f\"Error running text-only chat: {e}\")" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "WebSocket connection opened.\n", "Setting up microphone interface...\n", "MicrophoneInterface error: MicrophoneInterface.__init__() takes 1 positional argument but 2 were given\n", "Trying alternative microphone setup...\n", "Sent text message: Hello, I'm testing this voice interface.\n", "Press Ctrl+C to exit...\n", "Configuring socket with microphone settings...\n", "Microphone connected. Say something!\n", "Received message type: ChatMetadata\n", "Received message type: UserMessage\n", "Received message type: WebSocketError\n", "Received message type: AssistantMessage\n", "Received message type: AudioOutput\n", "Received message type: AudioOutput\n", "Received message type: AudioOutput\n", "Received message type: AssistantMessage\n", "Received message type: AudioOutput\n", "Received message type: AudioOutput\n", "Received message type: AudioOutput\n", "Received message type: AudioOutput\n", "Received message type: AssistantEnd\n", "Received message type: AssistantEnd\n", "WebSocket error: 'async for' requires an object with __aiter__ method, got Queue\n", "WebSocket connection closed.\n", "Unexpected error: 'async for' requires an object with __aiter__ method, got Queue\n", "Application terminated by user.\n" ] } ], "source": [ "# Run the EVI chat application\n", "run_evi_chat()" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['AsyncHumeClient', 'HumeClient', 'HumeClientEnvironment', 'MicrophoneInterface', 'Stream', '__all__', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', '__version__', 'base_client', 'client', 'core', 'empathic_voice', 'environment', 'expression_measurement', 'tts', 'version']\n" ] } ], "source": [ "import hume\n", "print(dir(hume))\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_client_wrapper', 'chat', 'chat_groups', 'chats', 'configs', 'custom_voices', 'prompts', 'tools']\n" ] } ], "source": [ "from hume import AsyncHumeClient\n", "client = AsyncHumeClient(api_key=\"YOUR_API_KEY\")\n", "print(dir(client.empathic_voice))\n" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['DEFAULT_MAX_PAYLOAD_SIZE_BYTES', '__annotations__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_construct_ws_uri', '_fetch_access_token', '_process_connection', '_wrap_on_error', '_wrap_on_message', '_wrap_on_open_close', 'client_wrapper', 'connect', 'connect_with_callbacks']\n" ] } ], "source": [ "print(dir(client.empathic_voice.chat))\n" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "ename": "ApiError", "evalue": "status_code: 404, body: {'timestamp': '2025-04-12T06:57:11.572+00:00', 'status': 404, 'error': 'Not Found', 'message': 'Either chat 470a49f6-1dec-4afe-8b61-035d3b2d63b0 does not exist or user (userId=1171ea7d-ebb0-4cf1-ab55-c42e3bfc140e) is not authorized to access it.', 'path': '/chats/470a49f6-1dec-4afe-8b61-035d3b2d63b0/audio'}", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mApiError\u001b[0m Traceback (most recent call last)", "Cell \u001b[0;32mIn[13], line 14\u001b[0m\n\u001b[1;32m 9\u001b[0m HUME_CONFIG_ID \u001b[38;5;241m=\u001b[39m os\u001b[38;5;241m.\u001b[39mgetenv(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mHUMEAI_CONFIG_ID\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 11\u001b[0m client \u001b[38;5;241m=\u001b[39m HumeClient(\n\u001b[1;32m 12\u001b[0m api_key\u001b[38;5;241m=\u001b[39mHUME_API_KEY,\n\u001b[1;32m 13\u001b[0m )\n\u001b[0;32m---> 14\u001b[0m \u001b[43mclient\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mempathic_voice\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mchats\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_audio\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 15\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mid\u001b[39;49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43m470a49f6-1dec-4afe-8b61-035d3b2d63b0\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 16\u001b[0m \u001b[43m)\u001b[49m\n", "File \u001b[0;32m/opt/anaconda3/envs/llms/lib/python3.11/site-packages/hume/empathic_voice/chats/client.py:287\u001b[0m, in \u001b[0;36mChatsClient.get_audio\u001b[0;34m(self, id, request_options)\u001b[0m\n\u001b[1;32m 285\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m JSONDecodeError:\n\u001b[1;32m 286\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ApiError(status_code\u001b[38;5;241m=\u001b[39m_response\u001b[38;5;241m.\u001b[39mstatus_code, body\u001b[38;5;241m=\u001b[39m_response\u001b[38;5;241m.\u001b[39mtext)\n\u001b[0;32m--> 287\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ApiError(status_code\u001b[38;5;241m=\u001b[39m_response\u001b[38;5;241m.\u001b[39mstatus_code, body\u001b[38;5;241m=\u001b[39m_response_json)\n", "\u001b[0;31mApiError\u001b[0m: status_code: 404, body: {'timestamp': '2025-04-12T06:57:11.572+00:00', 'status': 404, 'error': 'Not Found', 'message': 'Either chat 470a49f6-1dec-4afe-8b61-035d3b2d63b0 does not exist or user (userId=1171ea7d-ebb0-4cf1-ab55-c42e3bfc140e) is not authorized to access it.', 'path': '/chats/470a49f6-1dec-4afe-8b61-035d3b2d63b0/audio'}" ] } ], "source": [ "from hume import HumeClient\n", "from dotenv import load_dotenv\n", "import os\n", "\n", "load_dotenv()\n", " \n", "HUME_API_KEY = os.getenv(\"HUMEAI_API_KEY\")\n", "HUME_SECRET_KEY = os.getenv(\"HUMEAI_SECRET_KEY\")\n", "HUME_CONFIG_ID = os.getenv(\"HUMEAI_CONFIG_ID\")\n", "\n", "client = HumeClient(\n", " api_key=HUME_API_KEY,\n", ")\n", "client.empathic_voice.chats.get_audio(\n", " id=\"470a49f6-1dec-4afe-8b61-035d3b2d63b0\",\n", ")\n" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "import asyncio\n", "\n", "from hume.client import AsyncHumeClient\n", "\n", "client = AsyncHumeClient(api_key=HUME_API_KEY)\n", "\n", "async def main() -> None:\n", " await client.empathic_voice.configs.list_configs()\n", "\n", "import nest_asyncio\n", "nest_asyncio.apply()\n", "\n", "asyncio.run(main())" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from hume import StreamDataModels\n", "\n", "client = AsyncHumeClient(api_key=os.getenv(\"HUMEAI_API_KEY\"))\n", "\n", "async with client.expression_measurement.stream.connect(\n", " options={\"config\": StreamDataModels(...)}\n", ") as hume_socket:\n", " print(await hume_socket.get_job_details())" ] } ], "metadata": { "kernelspec": { "display_name": "llms", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.11" } }, "nbformat": 4, "nbformat_minor": 2 }