iLearn / websearch_logic.py
broadfield-dev's picture
Create websearch_logic.py
f46bc9b verified
import requests
from bs4 import BeautifulSoup, Comment
import logging
import os
import re
from duckduckgo_search import DDGS
from googlesearch import search as google_search_lib # Correct import
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 10
MAX_CONTENT_LENGTH_PER_URL = 3000
MAX_TOTAL_SCRAPED_CONTENT = 9000
# scrape_url function remains the same (as the last version with BeautifulSoup.Comment fix)
def scrape_url(url_to_scrape, query_filter=None):
try:
logger.debug(f"SCRAPER_MODULE: Scraping URL: {url_to_scrape}")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
'Accept-Language': 'en-US,en;q=0.9',
'Referer': 'https://www.google.com/'
}
response = requests.get(url_to_scrape, headers=headers, timeout=DEFAULT_TIMEOUT, allow_redirects=True)
response.raise_for_status()
content_type = response.headers.get('content-type', '').lower()
if 'html' not in content_type:
logger.info(f"SCRAPER_MODULE: Skipping non-HTML: {url_to_scrape} (type: {content_type})")
return {"url": url_to_scrape, "title": url_to_scrape, "error": f"Non-HTML: {content_type}"}
soup = BeautifulSoup(response.content, 'html.parser')
for element_type in ["script", "style", "nav", "footer", "aside", "form", "iframe", "noscript", "header", "menu", "button", "figure", "figcaption", "link", "meta", ".sidebar", ".ad", ".advertisement", ".popup", ".modal", ".share", ".social", ".related-posts", ".comments-area", ".site-footer", ".site-header", ".widget", ".cookie-banner", ".gdpr", "dialog"]:
for element in soup.select(element_type): element.decompose()
for comment_node in soup.find_all(string=lambda text: isinstance(text, Comment)): # Corrected
comment_node.extract()
main_content_selectors = ['main', 'article', '.main-content', '.entry-content', '.post-content', '.td-post-content', '.page-content', 'div[role="main"]', 'div[class*="content"]', 'div[class*="article"]', 'div[class*="post"]', 'div[id*="content"]', 'div[id*="main"]']
content_area = next((soup.select_one(s) for s in main_content_selectors if soup.select_one(s)), soup.body or soup)
text_parts = []
if content_area:
tags_to_check = ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'li', 'td', 'th', 'pre']
for element in content_area.find_all(tags_to_check):
text = element.get_text(separator=' ', strip=True)
if text and len(text) > 10: text_parts.append(text)
if not text_parts or len(" ".join(text_parts)) < 200:
all_text_from_area = content_area.get_text(separator='\n', strip=True)
all_text_from_area = re.sub(r'\n\s*\n+', '\n\n', all_text_from_area)
if all_text_from_area and len(all_text_from_area) > len(" ".join(text_parts)):
text_parts = [line for line in all_text_from_area.split('\n\n') if line.strip() and len(line.strip()) > 10]
full_text = "\n\n".join(list(dict.fromkeys(text_parts)))
if not full_text and hasattr(soup, 'body') and soup.body:
full_text = soup.body.get_text(separator='\n', strip=True)
elif not full_text: full_text = "Content could not be extracted."
full_text = re.sub(r'\s{3,}', ' ', full_text)
full_text = re.sub(r'(\n\s*){3,}', '\n\n', full_text)
title_tag = soup.find('title')
title = title_tag.string.strip() if title_tag and title_tag.string else url_to_scrape
if len(full_text) > MAX_CONTENT_LENGTH_PER_URL: full_text = full_text[:MAX_CONTENT_LENGTH_PER_URL] + "..."
logger.debug(f"SCRAPER_MODULE: Scraped {len(full_text)} chars from {url_to_scrape}. Title: {title}")
return {"url": url_to_scrape, "title": title.strip(), "content": full_text.strip()}
except requests.exceptions.HTTPError as e: logger.error(f"SCRAPER_MODULE: HTTP error {url_to_scrape}: {e.response.status_code}"); return {"url": url_to_scrape, "title": url_to_scrape, "error": f"HTTP error: {e.response.status_code}"}
except requests.exceptions.Timeout: logger.error(f"SCRAPER_MODULE: Timeout {url_to_scrape}"); return {"url": url_to_scrape, "title": url_to_scrape, "error": "Timeout"}
except requests.exceptions.RequestException as e: logger.error(f"SCRAPER_MODULE: Request failed {url_to_scrape}: {e}"); return {"url": url_to_scrape, "title": url_to_scrape, "error": f"Request failed: {e}"}
except Exception as e: logger.error(f"SCRAPER_MODULE: Scraping error {url_to_scrape}: {e}", exc_info=True); return {"url": url_to_scrape, "title": url_to_scrape, "error": f"Internal scraping error: {e}"}
# search_and_scrape_duckduckgo remains the same as the last version.
def search_and_scrape_duckduckgo(search_query, num_results=3):
scraped_data_all_urls = []
total_content_collected_length = 0
try:
logger.info(f"SCRAPER_MODULE (DDG): Searching for: '{search_query}' (max {num_results} results)")
search_results_urls_info = []
with DDGS() as ddgs:
ddg_search_results = ddgs.text(search_query, max_results=num_results + 2, region='wt-wt', safesearch='moderate')
if ddg_search_results:
for result in ddg_search_results[:num_results]:
if result.get('href'):
search_results_urls_info.append({
"url": result['href'],
"title": result.get('title', 'N/A'),
"description": result.get('body', 'N/A')
})
logger.info(f"SCRAPER_MODULE (DDG): Found {len(search_results_urls_info)} URLs: {[r['url'] for r in search_results_urls_info]}")
if not search_results_urls_info:
return [{"query": search_query, "engine": "DuckDuckGo", "error": "No search results."}]
for res_info in search_results_urls_info:
url_to_scrape = res_info["url"]
if total_content_collected_length >= MAX_TOTAL_SCRAPED_CONTENT: break
scraped_info = scrape_url(url_to_scrape)
if scraped_info:
if not scraped_info.get("title") or scraped_info.get("title") == url_to_scrape:
scraped_info["title"] = res_info.get("title", url_to_scrape)
current_content = scraped_info.get("content", "")
ddg_desc = res_info.get("description")
if ddg_desc and (not current_content or len(current_content) < 150 or scraped_info.get("error")):
scraped_info["content"] = f"Search result snippet: {ddg_desc}\n\n(Content from page below {'or error encountered' if scraped_info.get('error') else ''}):\n{current_content if current_content else 'No content extracted.'}"
scraped_data_all_urls.append(scraped_info)
if scraped_info.get("content") and not scraped_info.get("error"):
total_content_collected_length += len(scraped_info["content"])
else: scraped_data_all_urls.append({"url": url_to_scrape, "title": res_info.get("title", url_to_scrape), "error": "Scraping function returned no data."})
return scraped_data_all_urls
except Exception as e:
logger.error(f"SCRAPER_MODULE (DDG): Error for '{search_query}': {e}", exc_info=True)
return [{"query": search_query, "engine": "DuckDuckGo", "error": f"DDG search/scrape failed: {str(e)}"}]
def search_and_scrape_google(search_query, num_results=10):
"""
Performs a Google search (via googlesearch-python library) and scrapes the top results.
Uses arguments as per the provided documentation snippet.
"""
scraped_data_all_urls = []
total_content_collected_length = 0
try:
logger.info(f"SCRAPER_MODULE (Google): Searching for: '{search_query}' (num_results={num_results})")
search_results_info = [] # Will store SearchResult objects or dicts
try:
# Using arguments based on the provided documentation:
# query, lang, num_results, advanced, sleep_interval
# The library handles User-Agent internally.
# For few results (like 3), sleep_interval for inter-page requests is not triggered,
# but the library has an internal default pause between individual fetches.
# We can also set a small sleep_interval here for overall politeness for any number of results.
# 'advanced=True' is key to get SearchResult objects with title, url, description.
# The search function directly returns a list of SearchResult objects if advanced=True
results_iterable = google_search_lib(
query=search_query,
num_results=num_results,
lang='en', # Example language
advanced=True, # To get SearchResult objects
sleep_interval=1.0 # Politeness delay between requests the library makes internally
# This applies if library makes multiple HTTP requests for the results.
# For num_results=3, it likely makes one request.
)
# Convert SearchResult objects to our desired dict format
for res_obj in results_iterable:
if hasattr(res_obj, 'url'): # Check if it's a SearchResult object
search_results_info.append({
"url": res_obj.url,
"title": getattr(res_obj, 'title', 'N/A'),
"description": getattr(res_obj, 'description', 'N/A')
})
elif isinstance(res_obj, str): # Fallback if advanced=False or lib changes
search_results_info.append({"url": res_obj, "title": "N/A", "description": "N/A"})
logger.info(f"SCRAPER_MODULE (Google): Found {len(search_results_info)} result objects: {[r['url'] for r in search_results_info]}")
except Exception as e_search:
logger.error(f"SCRAPER_MODULE (Google): Error during google_search_lib call for '{search_query}': {e_search}", exc_info=True)
if "HTTP Error 429" in str(e_search):
return [{"query": search_query, "engine": "Google", "error": f"Google search blocked (HTTP 429). Try again later or use DuckDuckGo."}]
return [{"query": search_query, "engine": "Google", "error": f"Google search library failed: {str(e_search)}"}]
if not search_results_info:
return [{"query": search_query, "engine": "Google", "error": "No search results retrieved."}]
for res_info_item in search_results_info:
url_to_scrape = res_info_item["url"]
if total_content_collected_length >= MAX_TOTAL_SCRAPED_CONTENT:
logger.info(f"SCRAPER_MODULE (Google): Reached max total content length. Stopping further scraping.")
break
scraped_info = scrape_url(url_to_scrape)
if scraped_info:
# Use Google's title/description if scraping failed to get them or content is short
if not scraped_info.get("title") or scraped_info.get("title") == url_to_scrape:
scraped_info["title"] = res_info_item.get("title", url_to_scrape)
current_content = scraped_info.get("content", "")
google_desc = res_info_item.get("description")
if google_desc and (not current_content or len(current_content) < 150 or scraped_info.get("error")):
scraped_info["content"] = f"Search result description: {google_desc}\n\n(Content from page below {'or error encountered' if scraped_info.get('error') else ''}):\n{current_content if current_content else 'No content extracted.'}"
elif not current_content and google_desc: # If no content at all was scraped
scraped_info["content"] = f"Could not scrape full content. Search result description: {google_desc}"
scraped_data_all_urls.append(scraped_info)
if scraped_info.get("content") and not scraped_info.get("error"):
total_content_collected_length += len(scraped_info["content"])
else:
scraped_data_all_urls.append({"url": url_to_scrape, "title": res_info_item.get("title", "N/A"), "error": "Scraping function (scrape_url) returned no data."})
return scraped_data_all_urls
except Exception as e:
logger.error(f"SCRAPER_MODULE (Google): General error during search_and_scrape_google for '{search_query}': {e}", exc_info=True)
return [{"query": search_query, "engine": "Google", "error": f"Overall Google search or scraping process failed: {str(e)}"}]
def search_and_scrape(search_query, num_results=10):
"""
Performs a Google search (via googlesearch-python library) and scrapes the top results.
Uses arguments as per the provided documentation snippet.
"""
scraped_data_all_urls = []
total_content_collected_length = 0
try:
logger.info(f"SCRAPER_MODULE (Google): Searching for: '{search_query}' (num_results={num_results})")
search_results_info = [] # Will store SearchResult objects or dicts
try:
# Using arguments based on the provided documentation:
# query, lang, num_results, advanced, sleep_interval
# The library handles User-Agent internally.
# For few results (like 3), sleep_interval for inter-page requests is not triggered,
# but the library has an internal default pause between individual fetches.
# We can also set a small sleep_interval here for overall politeness for any number of results.
# 'advanced=True' is key to get SearchResult objects with title, url, description.
# The search function directly returns a list of SearchResult objects if advanced=True
results_iterable = google_search_lib(
query=search_query,
num_results=num_results,
lang='en', # Example language
advanced=True, # To get SearchResult objects
sleep_interval=1.0 # Politeness delay between requests the library makes internally
# This applies if library makes multiple HTTP requests for the results.
# For num_results=3, it likely makes one request.
)
# Convert SearchResult objects to our desired dict format
for res_obj in results_iterable:
if hasattr(res_obj, 'url'): # Check if it's a SearchResult object
search_results_info.append({
"url": res_obj.url,
"title": getattr(res_obj, 'title', 'N/A'),
"description": getattr(res_obj, 'description', 'N/A')
})
elif isinstance(res_obj, str): # Fallback if advanced=False or lib changes
search_results_info.append({"url": res_obj, "title": "N/A", "description": "N/A"})
logger.info(f"SCRAPER_MODULE (Google): Found {len(search_results_info)} result objects: {[r['url'] for r in search_results_info]}")
except Exception as e_search:
logger.error(f"SCRAPER_MODULE (Google): Error during google_search_lib call for '{search_query}': {e_search}", exc_info=True)
if "HTTP Error 429" in str(e_search):
return [{"query": search_query, "engine": "Google", "error": f"Google search blocked (HTTP 429). Try again later or use DuckDuckGo."}]
return [{"query": search_query, "engine": "Google", "error": f"Google search library failed: {str(e_search)}"}]
if not search_results_info:
return [{"query": search_query, "engine": "Google", "error": "No search results retrieved."}]
for res_info_item in search_results_info:
url_to_scrape = res_info_item["url"]
if total_content_collected_length >= MAX_TOTAL_SCRAPED_CONTENT:
logger.info(f"SCRAPER_MODULE (Google): Reached max total content length. Stopping further scraping.")
break
scraped_info = scrape_url(url_to_scrape)
if scraped_info:
# Use Google's title/description if scraping failed to get them or content is short
if not scraped_info.get("title") or scraped_info.get("title") == url_to_scrape:
scraped_info["title"] = res_info_item.get("title", url_to_scrape)
current_content = scraped_info.get("content", "")
google_desc = res_info_item.get("description")
if google_desc and (not current_content or len(current_content) < 150 or scraped_info.get("error")):
scraped_info["content"] = f"Search result description: {google_desc}\n\n(Content from page below {'or error encountered' if scraped_info.get('error') else ''}):\n{current_content if current_content else 'No content extracted.'}"
elif not current_content and google_desc: # If no content at all was scraped
scraped_info["content"] = f"Could not scrape full content. Search result description: {google_desc}"
scraped_data_all_urls.append(scraped_info)
if scraped_info.get("content") and not scraped_info.get("error"):
total_content_collected_length += len(scraped_info["content"])
else:
scraped_data_all_urls.append({"url": url_to_scrape, "title": res_info_item.get("title", "N/A"), "error": "Scraping function (scrape_url) returned no data."})
return scraped_data_all_urls
except Exception as e:
logger.error(f"SCRAPER_MODULE (Google): General error during search_and_scrape_google for '{search_query}': {e}", exc_info=True)
return [{"query": search_query, "engine": "Google", "error": f"Overall Google search or scraping process failed: {str(e)}"}]