philincloud commited on
Commit
55b6c13
·
verified ·
1 Parent(s): d97a423

Update langgraph_agent.py

Browse files
Files changed (1) hide show
  1. langgraph_agent.py +4 -20
langgraph_agent.py CHANGED
@@ -159,23 +159,6 @@ def describe_image(image_path: str) -> Dict[str, str]:
159
  except Exception as e:
160
  return {"error": f"Error describing image {image_path}: {str(e)}"}
161
 
162
- @tool
163
- def transcribe_audio(audio_path: str) -> Dict[str, str]:
164
- """
165
- Transcribes an audio file (e.g., MP3) to text using an automatic speech recognition model
166
- from the Hugging Face Inference API. Requires HF_API_TOKEN environment variable to be set.
167
- """
168
- if not HF_INFERENCE_CLIENT:
169
- return {"error": "Hugging Face API token not configured for audio transcription. Cannot use this tool."}
170
- try:
171
- with open(audio_path, "rb") as f:
172
- audio_bytes = f.read()
173
- transcription = HF_INFERENCE_CLIENT.automatic_speech_recognition(audio_bytes)
174
- return {"audio_transcription": transcription, "audio_path": audio_path}
175
- except FileNotFoundError:
176
- return {"error": f"Audio file not found: {audio_path}. Please ensure the file exists."}
177
- except Exception as e:
178
- return {"error": f"Error transcribing audio {audio_path}: {str(e)}"}
179
 
180
 
181
  API_KEY = os.getenv("GEMINI_API_KEY")
@@ -189,7 +172,6 @@ tools = [
189
  read_file_content,
190
  python_interpreter,
191
  describe_image,
192
- transcribe_audio,
193
  ]
194
 
195
 
@@ -222,8 +204,10 @@ def build_graph(provider: str = "gemini"):
222
  llm_with_tools = llm.bind_tools(tools)
223
 
224
  def assistant(state: MessagesState):
225
- messages_to_send = [sys_msg] + state["messages"]
226
- return {"messages": [llm_with_tools.invoke(messages_to_send)]}
 
 
227
 
228
  builder = StateGraph(MessagesState)
229
  builder.add_node("assistant", assistant)
 
159
  except Exception as e:
160
  return {"error": f"Error describing image {image_path}: {str(e)}"}
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
 
164
  API_KEY = os.getenv("GEMINI_API_KEY")
 
172
  read_file_content,
173
  python_interpreter,
174
  describe_image,
 
175
  ]
176
 
177
 
 
204
  llm_with_tools = llm.bind_tools(tools)
205
 
206
  def assistant(state: MessagesState):
207
+ messages_to_send = [sys_msg] + state["messages"]
208
+ llm_response = llm_with_tools.invoke(messages_to_send)
209
+ print(f"LLM Raw Response: {llm_response}") # Add this line
210
+ return {"messages": [llm_response]}
211
 
212
  builder = StateGraph(MessagesState)
213
  builder.add_node("assistant", assistant)