omarequalmars
commited on
Commit
·
7b71bd5
1
Parent(s):
a1c1d9a
Added wikipedia search
Browse files- requirements.txt +3 -0
- test_wikipedia.py +17 -0
- tools/__pycache__/langchain_tools.cpython-313.pyc +0 -0
- tools/__pycache__/wikipedia_tools.cpython-313.pyc +0 -0
- tools/__pycache__/youtube_tools.cpython-313.pyc +0 -0
- tools/langchain_tools.py +46 -0
- tools/wikipedia_tools.py +139 -0
- tools/youtube_tools.py +294 -0
requirements.txt
CHANGED
@@ -12,3 +12,6 @@ pandas
|
|
12 |
gradio[oauth]
|
13 |
openpyxl
|
14 |
xlrd
|
|
|
|
|
|
|
|
12 |
gradio[oauth]
|
13 |
openpyxl
|
14 |
xlrd
|
15 |
+
llama-index-tools-wikipedia
|
16 |
+
llama-index-core
|
17 |
+
wikipedia-api
|
test_wikipedia.py
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# test_wikipedia.py
|
2 |
+
from tools.wikipedia_tools import search_wikipedia, get_wikipedia_page, wikipedia_summary
|
3 |
+
|
4 |
+
# Test search
|
5 |
+
print("=== Testing Wikipedia Search ===")
|
6 |
+
result = search_wikipedia("Mercedes Sosa discography")
|
7 |
+
print(f"Search result: {result[:200]}...")
|
8 |
+
|
9 |
+
# Test specific page
|
10 |
+
print("\n=== Testing Wikipedia Page Retrieval ===")
|
11 |
+
result = get_wikipedia_page("Mercedes Sosa discography")
|
12 |
+
print(f"Page result: {result[:200]}...")
|
13 |
+
|
14 |
+
# Test summary
|
15 |
+
print("\n=== Testing Wikipedia Summary ===")
|
16 |
+
result = wikipedia_summary("Mercedes Sosa discography")
|
17 |
+
print(f"Summary result: {result[:200]}...")
|
tools/__pycache__/langchain_tools.cpython-313.pyc
CHANGED
Binary files a/tools/__pycache__/langchain_tools.cpython-313.pyc and b/tools/__pycache__/langchain_tools.cpython-313.pyc differ
|
|
tools/__pycache__/wikipedia_tools.cpython-313.pyc
ADDED
Binary file (6.02 kB). View file
|
|
tools/__pycache__/youtube_tools.cpython-313.pyc
CHANGED
Binary files a/tools/__pycache__/youtube_tools.cpython-313.pyc and b/tools/__pycache__/youtube_tools.cpython-313.pyc differ
|
|
tools/langchain_tools.py
CHANGED
@@ -16,6 +16,7 @@ from .multimodal_tools import MultimodalTools, analyze_transcript as _analyze_tr
|
|
16 |
from .search_tools import SearchTools
|
17 |
from .math_tools import MathTools
|
18 |
from .youtube_tools import YouTubeTools
|
|
|
19 |
|
20 |
# Initialize tool instances (now env vars are available)
|
21 |
multimodal_tools = MultimodalTools()
|
@@ -249,6 +250,46 @@ def analyze_python_tool(file_path: str, question: str = "What is the final outpu
|
|
249 |
# =============================================================================
|
250 |
# TOOL COLLECTIONS FOR EASY IMPORT
|
251 |
# =============================================================================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
252 |
|
253 |
# Core tools (matching original template)
|
254 |
CORE_TOOLS = [
|
@@ -266,8 +307,13 @@ CORE_TOOLS = [
|
|
266 |
# Extended tools with new Excel functionality
|
267 |
EXTENDED_TOOLS = CORE_TOOLS + [
|
268 |
analyze_excel_tool, # NEW: Excel/CSV analysis
|
|
|
|
|
269 |
analyze_python_tool,
|
270 |
search_news_tool,
|
|
|
|
|
|
|
271 |
search_academic_tool,
|
272 |
get_youtube_info,
|
273 |
get_youtube_playlist_info,
|
|
|
16 |
from .search_tools import SearchTools
|
17 |
from .math_tools import MathTools
|
18 |
from .youtube_tools import YouTubeTools
|
19 |
+
from .wikipedia_tools import search_wikipedia, get_wikipedia_page, wikipedia_summary
|
20 |
|
21 |
# Initialize tool instances (now env vars are available)
|
22 |
multimodal_tools = MultimodalTools()
|
|
|
250 |
# =============================================================================
|
251 |
# TOOL COLLECTIONS FOR EASY IMPORT
|
252 |
# =============================================================================
|
253 |
+
@tool
|
254 |
+
def analyze_youtube_frames(url: str, question: str = "Describe what happens in this video") -> str:
|
255 |
+
"""Extract frames from YouTube video and analyze visual content"""
|
256 |
+
youtube_tools = YouTubeTools()
|
257 |
+
return youtube_tools.analyze_video_content(url, question)
|
258 |
+
|
259 |
+
@tool
|
260 |
+
def extract_video_slides(url: str) -> str:
|
261 |
+
"""Extract and analyze slides from educational YouTube videos"""
|
262 |
+
youtube_tools = YouTubeTools()
|
263 |
+
results = youtube_tools.analyze_video_slides(url)
|
264 |
+
|
265 |
+
if 'error' in results:
|
266 |
+
return results['error']
|
267 |
+
|
268 |
+
# Format slide analysis for LLM
|
269 |
+
slide_content = []
|
270 |
+
for frame in results.get('frames_analyzed', []):
|
271 |
+
if 'slide' in frame.get('analysis', '').lower():
|
272 |
+
slide_content.append(f"Slide {frame['frame_number']}: {frame['analysis']}")
|
273 |
+
|
274 |
+
if slide_content:
|
275 |
+
return "\n\n".join(slide_content)
|
276 |
+
else:
|
277 |
+
return results.get('analysis_summary', 'No slides detected in video')
|
278 |
+
# Add Wikipedia tools
|
279 |
+
@tool
|
280 |
+
def search_wikipedia_tool(query: str, language: str = 'en') -> str:
|
281 |
+
"""Search Wikipedia for information about a topic"""
|
282 |
+
return search_wikipedia(query, language)
|
283 |
+
|
284 |
+
@tool
|
285 |
+
def get_wikipedia_page_tool(page_title: str, language: str = 'en') -> str:
|
286 |
+
"""Retrieve a specific Wikipedia page by title"""
|
287 |
+
return get_wikipedia_page(page_title, language)
|
288 |
+
|
289 |
+
@tool
|
290 |
+
def wikipedia_summary_tool(query: str, language: str = 'en') -> str:
|
291 |
+
"""Get a concise Wikipedia summary about a topic"""
|
292 |
+
return wikipedia_summary(query, language)
|
293 |
|
294 |
# Core tools (matching original template)
|
295 |
CORE_TOOLS = [
|
|
|
307 |
# Extended tools with new Excel functionality
|
308 |
EXTENDED_TOOLS = CORE_TOOLS + [
|
309 |
analyze_excel_tool, # NEW: Excel/CSV analysis
|
310 |
+
analyze_youtube_frames, # ✅ NEW: Frame extraction and analysis
|
311 |
+
extract_video_slides, # ✅ NEW: Slide detection
|
312 |
analyze_python_tool,
|
313 |
search_news_tool,
|
314 |
+
search_wikipedia_tool, # ✅ NEW: Wikipedia search
|
315 |
+
get_wikipedia_page_tool, # ✅ NEW: Specific Wikipedia pages
|
316 |
+
wikipedia_summary_tool, # ✅ NEW: Wikipedia summaries
|
317 |
search_academic_tool,
|
318 |
get_youtube_info,
|
319 |
get_youtube_playlist_info,
|
tools/wikipedia_tools.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# tools/wikipedia_tools.py
|
2 |
+
"""
|
3 |
+
Wikipedia search tools using LlamaIndex's ready-made Wikipedia integration
|
4 |
+
Based on search results showing WikipediaToolSpec usage
|
5 |
+
"""
|
6 |
+
|
7 |
+
from typing import Optional, List
|
8 |
+
import logging
|
9 |
+
from .utils import logger
|
10 |
+
|
11 |
+
try:
|
12 |
+
from llama_index.tools.wikipedia import WikipediaToolSpec
|
13 |
+
WIKIPEDIA_AVAILABLE = True
|
14 |
+
except ImportError:
|
15 |
+
logger.warning("LlamaIndex Wikipedia tools not available. Install with: pip install llama-index-tools-wikipedia")
|
16 |
+
WIKIPEDIA_AVAILABLE = False
|
17 |
+
|
18 |
+
class WikipediaTools:
|
19 |
+
"""Wikipedia search and page retrieval tools using LlamaIndex"""
|
20 |
+
|
21 |
+
def __init__(self):
|
22 |
+
if not WIKIPEDIA_AVAILABLE:
|
23 |
+
raise ImportError("LlamaIndex Wikipedia tools not installed")
|
24 |
+
|
25 |
+
# Initialize the LlamaIndex Wikipedia tool spec
|
26 |
+
self.tool_spec = WikipediaToolSpec()
|
27 |
+
logger.info("Wikipedia tools initialized successfully")
|
28 |
+
|
29 |
+
def search_wikipedia(self, query: str, language: str = 'en') -> str:
|
30 |
+
"""
|
31 |
+
Search Wikipedia for pages related to a query
|
32 |
+
Based on search results showing search_data method
|
33 |
+
|
34 |
+
Args:
|
35 |
+
query: Search term
|
36 |
+
language: Wikipedia language (default: 'en')
|
37 |
+
|
38 |
+
Returns:
|
39 |
+
Search results from Wikipedia
|
40 |
+
"""
|
41 |
+
try:
|
42 |
+
logger.info(f"Searching Wikipedia for: {query}")
|
43 |
+
|
44 |
+
# Use LlamaIndex's search_data method (from search results)
|
45 |
+
results = self.tool_spec.search_data(query=query, lang=language)
|
46 |
+
|
47 |
+
if results:
|
48 |
+
logger.info(f"Found Wikipedia results for: {query}")
|
49 |
+
return results
|
50 |
+
else:
|
51 |
+
return f"No Wikipedia results found for: {query}"
|
52 |
+
|
53 |
+
except Exception as e:
|
54 |
+
error_msg = f"Error searching Wikipedia: {str(e)}"
|
55 |
+
logger.error(error_msg)
|
56 |
+
return error_msg
|
57 |
+
|
58 |
+
def get_wikipedia_page(self, page_title: str, language: str = 'en') -> str:
|
59 |
+
"""
|
60 |
+
Retrieve a specific Wikipedia page
|
61 |
+
Based on search results showing load_data method
|
62 |
+
|
63 |
+
Args:
|
64 |
+
page_title: Title of the Wikipedia page
|
65 |
+
language: Wikipedia language (default: 'en')
|
66 |
+
|
67 |
+
Returns:
|
68 |
+
Content of the Wikipedia page
|
69 |
+
"""
|
70 |
+
try:
|
71 |
+
logger.info(f"Retrieving Wikipedia page: {page_title}")
|
72 |
+
|
73 |
+
# Use LlamaIndex's load_data method (from search results)
|
74 |
+
content = self.tool_spec.load_data(page=page_title, lang=language)
|
75 |
+
|
76 |
+
if content:
|
77 |
+
logger.info(f"Successfully retrieved Wikipedia page: {page_title}")
|
78 |
+
return content
|
79 |
+
else:
|
80 |
+
return f"Wikipedia page not found: {page_title}"
|
81 |
+
|
82 |
+
except Exception as e:
|
83 |
+
error_msg = f"Error retrieving Wikipedia page '{page_title}': {str(e)}"
|
84 |
+
logger.error(error_msg)
|
85 |
+
return error_msg
|
86 |
+
|
87 |
+
def search_and_summarize(self, query: str, language: str = 'en') -> str:
|
88 |
+
"""
|
89 |
+
Search Wikipedia and get a focused summary
|
90 |
+
Combines search and page retrieval for better results
|
91 |
+
"""
|
92 |
+
try:
|
93 |
+
# First search for relevant pages
|
94 |
+
search_results = self.search_wikipedia(query, language)
|
95 |
+
|
96 |
+
if "No Wikipedia results found" in search_results:
|
97 |
+
return search_results
|
98 |
+
|
99 |
+
# Extract the first few sentences for a summary
|
100 |
+
# This gives us the most relevant information without overwhelming the LLM
|
101 |
+
lines = search_results.split('\n')
|
102 |
+
summary_lines = [line for line in lines[:10] if line.strip()]
|
103 |
+
summary = '\n'.join(summary_lines)
|
104 |
+
|
105 |
+
# Truncate if too long (to stay within token limits)
|
106 |
+
if len(summary) > 2000:
|
107 |
+
summary = summary[:2000] + "..."
|
108 |
+
|
109 |
+
return summary
|
110 |
+
|
111 |
+
except Exception as e:
|
112 |
+
error_msg = f"Error in Wikipedia search and summarize: {str(e)}"
|
113 |
+
logger.error(error_msg)
|
114 |
+
return error_msg
|
115 |
+
|
116 |
+
# Convenience functions for direct use
|
117 |
+
def search_wikipedia(query: str, language: str = 'en') -> str:
|
118 |
+
"""Standalone function to search Wikipedia"""
|
119 |
+
if not WIKIPEDIA_AVAILABLE:
|
120 |
+
return "Wikipedia tools not available. Please install llama-index-tools-wikipedia"
|
121 |
+
|
122 |
+
tools = WikipediaTools()
|
123 |
+
return tools.search_wikipedia(query, language)
|
124 |
+
|
125 |
+
def get_wikipedia_page(page_title: str, language: str = 'en') -> str:
|
126 |
+
"""Standalone function to get a Wikipedia page"""
|
127 |
+
if not WIKIPEDIA_AVAILABLE:
|
128 |
+
return "Wikipedia tools not available. Please install llama-index-tools-wikipedia"
|
129 |
+
|
130 |
+
tools = WikipediaTools()
|
131 |
+
return tools.get_wikipedia_page(page_title, language)
|
132 |
+
|
133 |
+
def wikipedia_summary(query: str, language: str = 'en') -> str:
|
134 |
+
"""Standalone function to get a Wikipedia summary"""
|
135 |
+
if not WIKIPEDIA_AVAILABLE:
|
136 |
+
return "Wikipedia tools not available. Please install llama-index-tools-wikipedia"
|
137 |
+
|
138 |
+
tools = WikipediaTools()
|
139 |
+
return tools.search_and_summarize(query, language)
|
tools/youtube_tools.py
CHANGED
@@ -12,6 +12,13 @@ import time
|
|
12 |
import logging
|
13 |
from .utils import logger, validate_file_exists
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
class YouTubeTools:
|
16 |
"""YouTube tools with improved error handling and network resilience"""
|
17 |
|
@@ -285,6 +292,293 @@ class YouTubeTools:
|
|
285 |
except Exception as e:
|
286 |
logger.error(f"Failed to get qualities for {url}: {e}")
|
287 |
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
288 |
|
289 |
# Convenience functions (unchanged)
|
290 |
def get_video_info(url: str) -> Optional[Dict[str, Any]]:
|
|
|
12 |
import logging
|
13 |
from .utils import logger, validate_file_exists
|
14 |
|
15 |
+
import cv2
|
16 |
+
import tempfile
|
17 |
+
import os
|
18 |
+
from typing import Optional, Dict, Any, List
|
19 |
+
from PIL import Image
|
20 |
+
import numpy as np
|
21 |
+
|
22 |
class YouTubeTools:
|
23 |
"""YouTube tools with improved error handling and network resilience"""
|
24 |
|
|
|
292 |
except Exception as e:
|
293 |
logger.error(f"Failed to get qualities for {url}: {e}")
|
294 |
return None
|
295 |
+
def extract_and_analyze_frames(self, url: str, num_frames: int = 5, analysis_question: str = "Describe what you see in this frame") -> Dict[str, Any]:
|
296 |
+
"""
|
297 |
+
Extract key frames and analyze video content visually
|
298 |
+
Based on search results showing OpenCV and MoviePy approaches
|
299 |
+
"""
|
300 |
+
logger.info(f"Starting frame extraction for {url} with {num_frames} frames")
|
301 |
+
|
302 |
+
results = {
|
303 |
+
'video_info': None,
|
304 |
+
'frames_analyzed': [],
|
305 |
+
'extraction_method': None,
|
306 |
+
'total_frames_extracted': 0,
|
307 |
+
'analysis_summary': None
|
308 |
+
}
|
309 |
+
|
310 |
+
try:
|
311 |
+
# Get video info first
|
312 |
+
video_info = self.get_video_info(url)
|
313 |
+
if not video_info:
|
314 |
+
return {'error': 'Could not retrieve video information'}
|
315 |
+
|
316 |
+
results['video_info'] = {
|
317 |
+
'title': video_info.get('title', 'Unknown'),
|
318 |
+
'duration': video_info.get('length', 0),
|
319 |
+
'author': video_info.get('author', 'Unknown')
|
320 |
+
}
|
321 |
+
|
322 |
+
# Strategy 1: Try full video download and OpenCV frame extraction (local environment)
|
323 |
+
frame_paths = self._strategy_1_opencv_extraction(url, num_frames)
|
324 |
+
|
325 |
+
if frame_paths:
|
326 |
+
results['extraction_method'] = 'OpenCV Video Download'
|
327 |
+
results['frames_analyzed'] = self._analyze_extracted_frames(frame_paths, analysis_question)
|
328 |
+
results['total_frames_extracted'] = len(frame_paths)
|
329 |
+
|
330 |
+
# Cleanup downloaded video and frames
|
331 |
+
self._cleanup_files(frame_paths)
|
332 |
+
else:
|
333 |
+
# Strategy 2: Thumbnail analysis fallback (HF Spaces compatible)
|
334 |
+
thumbnail_analysis = self._strategy_2_thumbnail_analysis(url, analysis_question)
|
335 |
+
results['extraction_method'] = 'Thumbnail Analysis (Fallback)'
|
336 |
+
results['frames_analyzed'] = [thumbnail_analysis]
|
337 |
+
results['total_frames_extracted'] = 1
|
338 |
+
|
339 |
+
# Generate overall summary
|
340 |
+
results['analysis_summary'] = self._generate_frame_analysis_summary(results)
|
341 |
+
|
342 |
+
return results
|
343 |
+
|
344 |
+
except Exception as e:
|
345 |
+
logger.error(f"Error in frame extraction: {e}")
|
346 |
+
return {'error': f'Frame extraction failed: {str(e)}'}
|
347 |
+
|
348 |
+
def _strategy_1_opencv_extraction(self, url: str, num_frames: int) -> List[str]:
|
349 |
+
"""
|
350 |
+
Strategy 1: Download video and extract frames using OpenCV
|
351 |
+
Based on search result [2] OpenCV approach
|
352 |
+
"""
|
353 |
+
try:
|
354 |
+
# Check if we're in a restricted environment (HF Spaces)
|
355 |
+
if os.getenv("SPACE_ID"):
|
356 |
+
logger.info("Restricted environment detected, skipping video download")
|
357 |
+
return []
|
358 |
+
|
359 |
+
# Download video to temporary location
|
360 |
+
temp_dir = tempfile.mkdtemp()
|
361 |
+
video_path = self.download_video(url, output_path=temp_dir, resolution='lowest')
|
362 |
+
|
363 |
+
if not video_path or not os.path.exists(video_path):
|
364 |
+
logger.warning("Video download failed")
|
365 |
+
return []
|
366 |
+
|
367 |
+
# Extract frames using OpenCV (based on search results)
|
368 |
+
frame_paths = self._extract_frames_opencv(video_path, num_frames)
|
369 |
+
|
370 |
+
# Cleanup video file (keep frame files for analysis)
|
371 |
+
if os.path.exists(video_path):
|
372 |
+
os.remove(video_path)
|
373 |
+
|
374 |
+
return frame_paths
|
375 |
+
|
376 |
+
except Exception as e:
|
377 |
+
logger.error(f"Strategy 1 failed: {e}")
|
378 |
+
return []
|
379 |
+
|
380 |
+
def _extract_frames_opencv(self, video_path: str, num_frames: int) -> List[str]:
|
381 |
+
"""
|
382 |
+
Extract frames using OpenCV - implementation from search results
|
383 |
+
Based on search result [2] and [4] showing cv2.VideoCapture approach
|
384 |
+
"""
|
385 |
+
frame_paths = []
|
386 |
+
|
387 |
+
try:
|
388 |
+
# Load video using OpenCV (from search results)
|
389 |
+
cap = cv2.VideoCapture(video_path)
|
390 |
+
|
391 |
+
if not cap.isOpened():
|
392 |
+
logger.error("Error: Could not open video with OpenCV")
|
393 |
+
return []
|
394 |
+
|
395 |
+
# Get total frames and calculate intervals (from search results)
|
396 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
397 |
+
logger.info(f"Total frames in video: {total_frames}")
|
398 |
+
|
399 |
+
if total_frames == 0:
|
400 |
+
return []
|
401 |
+
|
402 |
+
# Calculate frame intervals to get evenly distributed frames
|
403 |
+
if num_frames >= total_frames:
|
404 |
+
frame_intervals = list(range(total_frames))
|
405 |
+
else:
|
406 |
+
frame_intervals = [int(total_frames * i / (num_frames - 1)) for i in range(num_frames)]
|
407 |
+
frame_intervals[-1] = total_frames - 1 # Ensure we get the last frame
|
408 |
+
|
409 |
+
# Extract frames at calculated intervals (based on search results pattern)
|
410 |
+
for i, frame_num in enumerate(frame_intervals):
|
411 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_num)
|
412 |
+
ret, frame = cap.read()
|
413 |
+
|
414 |
+
if ret:
|
415 |
+
# Save frame as temporary file (from search results)
|
416 |
+
frame_filename = tempfile.mktemp(suffix=f'_frame_{i}.jpg')
|
417 |
+
cv2.imwrite(frame_filename, frame)
|
418 |
+
frame_paths.append(frame_filename)
|
419 |
+
logger.debug(f"Extracted frame {i} at position {frame_num}")
|
420 |
+
else:
|
421 |
+
logger.warning(f"Failed to read frame at position {frame_num}")
|
422 |
+
|
423 |
+
cap.release()
|
424 |
+
logger.info(f"Successfully extracted {len(frame_paths)} frames using OpenCV")
|
425 |
+
return frame_paths
|
426 |
+
|
427 |
+
except Exception as e:
|
428 |
+
logger.error(f"OpenCV frame extraction failed: {e}")
|
429 |
+
return []
|
430 |
+
|
431 |
+
def _strategy_2_thumbnail_analysis(self, url: str, analysis_question: str) -> Dict[str, Any]:
|
432 |
+
"""
|
433 |
+
Strategy 2: Analyze thumbnail when video download isn't possible
|
434 |
+
Fallback for HF Spaces environment
|
435 |
+
"""
|
436 |
+
try:
|
437 |
+
from .multimodal_tools import MultimodalTools
|
438 |
+
multimodal = MultimodalTools()
|
439 |
+
|
440 |
+
# Get video info for thumbnail
|
441 |
+
video_info = self.get_video_info(url)
|
442 |
+
if not video_info or not video_info.get('thumbnail_url'):
|
443 |
+
return {'error': 'No thumbnail available'}
|
444 |
+
|
445 |
+
# Download and analyze thumbnail
|
446 |
+
thumbnail_url = video_info['thumbnail_url']
|
447 |
+
|
448 |
+
# Download thumbnail to temporary file
|
449 |
+
import requests
|
450 |
+
response = requests.get(thumbnail_url, timeout=10)
|
451 |
+
response.raise_for_status()
|
452 |
+
|
453 |
+
with tempfile.NamedTemporaryFile(suffix='.jpg', delete=False) as tmp_file:
|
454 |
+
tmp_file.write(response.content)
|
455 |
+
thumbnail_path = tmp_file.name
|
456 |
+
|
457 |
+
# Analyze thumbnail
|
458 |
+
analysis = multimodal.analyze_image(
|
459 |
+
thumbnail_path,
|
460 |
+
f"This is a thumbnail from a YouTube video. {analysis_question}"
|
461 |
+
)
|
462 |
+
|
463 |
+
# Cleanup
|
464 |
+
os.unlink(thumbnail_path)
|
465 |
+
|
466 |
+
return {
|
467 |
+
'frame_number': 0,
|
468 |
+
'timestamp': 'thumbnail',
|
469 |
+
'analysis': analysis,
|
470 |
+
'extraction_method': 'thumbnail'
|
471 |
+
}
|
472 |
+
|
473 |
+
except Exception as e:
|
474 |
+
logger.error(f"Thumbnail analysis failed: {e}")
|
475 |
+
return {'error': f'Thumbnail analysis failed: {str(e)}'}
|
476 |
+
|
477 |
+
def _analyze_extracted_frames(self, frame_paths: List[str], analysis_question: str) -> List[Dict[str, Any]]:
|
478 |
+
"""
|
479 |
+
Analyze extracted frames using multimodal AI
|
480 |
+
"""
|
481 |
+
analyzed_frames = []
|
482 |
+
|
483 |
+
try:
|
484 |
+
from .multimodal_tools import MultimodalTools
|
485 |
+
multimodal = MultimodalTools()
|
486 |
+
|
487 |
+
for i, frame_path in enumerate(frame_paths):
|
488 |
+
try:
|
489 |
+
analysis = multimodal.analyze_image(frame_path, analysis_question)
|
490 |
+
|
491 |
+
analyzed_frames.append({
|
492 |
+
'frame_number': i,
|
493 |
+
'timestamp': f'frame_{i}',
|
494 |
+
'analysis': analysis,
|
495 |
+
'extraction_method': 'opencv'
|
496 |
+
})
|
497 |
+
|
498 |
+
except Exception as e:
|
499 |
+
logger.warning(f"Failed to analyze frame {i}: {e}")
|
500 |
+
analyzed_frames.append({
|
501 |
+
'frame_number': i,
|
502 |
+
'timestamp': f'frame_{i}',
|
503 |
+
'analysis': f'Analysis failed: {str(e)}',
|
504 |
+
'extraction_method': 'opencv'
|
505 |
+
})
|
506 |
+
|
507 |
+
return analyzed_frames
|
508 |
+
|
509 |
+
except Exception as e:
|
510 |
+
logger.error(f"Frame analysis failed: {e}")
|
511 |
+
return []
|
512 |
+
|
513 |
+
def _generate_frame_analysis_summary(self, results: Dict[str, Any]) -> str:
|
514 |
+
"""Generate overall summary of frame analysis"""
|
515 |
+
try:
|
516 |
+
if not results.get('frames_analyzed'):
|
517 |
+
return "No frames were successfully analyzed"
|
518 |
+
|
519 |
+
# Combine all frame analyses
|
520 |
+
all_analyses = []
|
521 |
+
for frame in results['frames_analyzed']:
|
522 |
+
if isinstance(frame, dict) and 'analysis' in frame:
|
523 |
+
all_analyses.append(frame['analysis'])
|
524 |
+
|
525 |
+
if not all_analyses:
|
526 |
+
return "No valid frame analyses found"
|
527 |
+
|
528 |
+
# Use multimodal AI to create summary
|
529 |
+
from .multimodal_tools import MultimodalTools
|
530 |
+
multimodal = MultimodalTools()
|
531 |
+
|
532 |
+
combined_text = "\n\n".join([f"Frame {i}: {analysis}" for i, analysis in enumerate(all_analyses)])
|
533 |
+
|
534 |
+
summary_prompt = f"""
|
535 |
+
Based on these frame analyses from a video titled "{results['video_info']['title']}",
|
536 |
+
create a comprehensive summary of the video's visual content:
|
537 |
+
|
538 |
+
{combined_text}
|
539 |
+
|
540 |
+
Provide a concise summary highlighting the main visual elements, actions, and themes.
|
541 |
+
"""
|
542 |
+
|
543 |
+
summary = multimodal._make_openrouter_request({
|
544 |
+
"model": multimodal.text_model,
|
545 |
+
"messages": [{"role": "user", "content": summary_prompt}],
|
546 |
+
"temperature": 0,
|
547 |
+
"max_tokens": 512
|
548 |
+
})
|
549 |
+
|
550 |
+
return summary
|
551 |
+
|
552 |
+
except Exception as e:
|
553 |
+
logger.error(f"Summary generation failed: {e}")
|
554 |
+
return f"Summary generation failed: {str(e)}"
|
555 |
+
|
556 |
+
def _cleanup_files(self, file_paths: List[str]):
|
557 |
+
"""Clean up temporary files"""
|
558 |
+
for file_path in file_paths:
|
559 |
+
try:
|
560 |
+
if os.path.exists(file_path):
|
561 |
+
os.remove(file_path)
|
562 |
+
except Exception as e:
|
563 |
+
logger.warning(f"Could not remove {file_path}: {e}")
|
564 |
+
|
565 |
+
# Convenience method for specific use cases
|
566 |
+
def analyze_video_slides(self, url: str) -> Dict[str, Any]:
|
567 |
+
"""Specialized method for analyzing educational videos with slides"""
|
568 |
+
return self.extract_and_analyze_frames(
|
569 |
+
url,
|
570 |
+
num_frames=8,
|
571 |
+
analysis_question="Is this a presentation slide? If yes, extract the main title and key points. If no, describe the visual content."
|
572 |
+
)
|
573 |
+
|
574 |
+
def analyze_video_content(self, url: str, question: str) -> str:
|
575 |
+
"""Analyze video content and answer specific questions"""
|
576 |
+
frame_results = self.extract_and_analyze_frames(url, num_frames=5, analysis_question=question)
|
577 |
+
|
578 |
+
if 'error' in frame_results:
|
579 |
+
return frame_results['error']
|
580 |
+
|
581 |
+
return frame_results.get('analysis_summary', 'No analysis available')
|
582 |
|
583 |
# Convenience functions (unchanged)
|
584 |
def get_video_info(url: str) -> Optional[Dict[str, Any]]:
|