Spaces:
Starting
Starting
File size: 2,016 Bytes
751d628 4701375 751d628 4701375 751d628 4701375 751d628 4701375 751d628 4701375 751d628 4701375 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import aiohttp
import ssl
import logging
from langchain_core.tools import tool
from tenacity import retry, stop_after_attempt, wait_exponential
from typing import Optional
import json
import os
logger = logging.getLogger(__name__)
@retry(stop=stop_after_attempt(3), wait=wait_exponential(multiplier=1, min=1, max=10))
async def fetch_hf_models(author: str) -> Optional[dict]:
url = f"https://huggingface.co/api/models?author={author}&sort=downloads&direction=-1&limit=1"
ssl_context = ssl.create_default_context()
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, ssl=ssl_context) as response:
response.raise_for_status()
return await response.json()
except aiohttp.ClientError as e:
logger.error(f"Failed to fetch models for {author}: {e}")
raise
@tool
async def hub_stats_tool(author: str) -> str:
"""
Fetch the most downloaded model from a specific author on Hugging Face Hub.
Args:
author (str): Hugging Face author username.
Returns:
str: Model information or error message.
"""
try:
# Check local cache
cache_file = f"temp/hf_cache_{author}.json"
if os.path.exists(cache_file):
with open(cache_file, "r") as f:
models = json.load(f)
logger.debug(f"Loaded cached models for {author}")
else:
models = await fetch_hf_models(author)
os.makedirs("temp", exist_ok=True)
with open(cache_file, "w") as f:
json.dump(models, f)
if models and isinstance(models, list) and models:
model = models[0]
return f"The most downloaded model by {author} is {model['id']} with {model.get('downloads', 0):,} downloads."
return f"No models found for author {author}."
except Exception as e:
logger.error(f"Error fetching models for {author}: {e}")
return f"Error: {str(e)}" |