import json import mimetypes import os import re import shutil import threading from typing import Optional from loguru import logger from datetime import datetime import gradio as gr from dotenv import load_dotenv from huggingface_hub import login, HfApi from smolagents import ( CodeAgent, InferenceClientModel, Tool, DuckDuckGoSearchTool, ) from smolagents.agent_types import ( AgentAudio, AgentImage, AgentText, handle_agent_output_types, ) from smolagents.gradio_ui import stream_to_gradio from scripts.text_inspector_tool import TextInspectorTool from scripts.text_web_browser import ( ArchiveSearchTool, FinderTool, FindNextTool, PageDownTool, PageUpTool, SimpleTextBrowser, VisitTool, ) from scripts.visual_qa import visualizer from scripts.cvedb_tool import CVEDBTool from scripts.report_generator import ReportGeneratorTool from scripts.epss_tool import EpsTool from scripts.nvd_tool import NvdTool from scripts.kevin_tool import KevinTool # web_search = GoogleSearchTool(provider="serper") web_search = DuckDuckGoSearchTool() AUTHORIZED_IMPORTS = [ "requests", "zipfile", "pandas", "numpy", "sympy", "json", "bs4", "pubchempy", "xml", "yahoo_finance", "Bio", "sklearn", "scipy", "pydub", "PIL", "chess", "PyPDF2", "pptx", "torch", "datetime", "fractions", "csv", "plotly", "plotly.express", "plotly.graph_objects", "jinja2", ] load_dotenv(override=True) # Only login if HF_TOKEN is available and valid in environment if os.getenv("HF_TOKEN"): try: login(os.getenv("HF_TOKEN")) logger.info("Successfully logged in with HF_TOKEN from environment") except Exception as e: logger.warning(f"Failed to login with HF_TOKEN from environment: {e}") logger.info("You can still use the application by providing a valid API key in the interface") append_answer_lock = threading.Lock() custom_role_conversions = {"tool-call": "assistant", "tool-response": "user"} user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0" BROWSER_CONFIG = { "viewport_size": 1024 * 5, "downloads_folder": "downloads_folder", "request_kwargs": { "headers": {"User-Agent": user_agent}, "timeout": 300, }, "serpapi_key": os.getenv("SERPAPI_API_KEY"), } os.makedirs(f"./{BROWSER_CONFIG['downloads_folder']}", exist_ok=True) # Default Hugging Face model configuration model_id = os.getenv("MODEL_ID", "Qwen/Qwen2.5-Coder-32B-Instruct") logger.info(f"Default Hugging Face model: {model_id}") # Define text_limit before using it text_limit = 20000 # Default model (will be overridden by session-specific models) # Note: This model may not work without a valid API key default_model = None ti_tool = None # Only try to create default model if we have a valid token if os.getenv("HF_TOKEN"): try: # Test if the token is valid api = HfApi(token=os.getenv("HF_TOKEN")) api.whoami() # This will raise an exception if token is invalid # If we get here, token is valid default_model = InferenceClientModel( model_id, custom_role_conversions={ "tool-call": "assistant", "tool-response": "user" }, token=os.getenv("HF_TOKEN") ) ti_tool = TextInspectorTool(default_model, text_limit) logger.info("Default model created successfully with valid token") except Exception as e: logger.warning(f"Failed to create default model: {e}") default_model = None ti_tool = None else: logger.info("No HF_TOKEN provided, default model will be created when user provides API key") browser = SimpleTextBrowser(**BROWSER_CONFIG) # Tool configuration cvedb_tool = CVEDBTool() report_generator = ReportGeneratorTool() epss_tool = EpsTool() nvd_tool = NvdTool() kevin_tool = KevinTool() # Default tools (will be updated with session-specific models) WEB_TOOLS = [ web_search, # duckduckgo VisitTool(browser), PageUpTool(browser), PageDownTool(browser), FinderTool(browser), FindNextTool(browser), ArchiveSearchTool(browser), ] + ([ti_tool] if ti_tool else []) + [ cvedb_tool, # CVEDB Tool # report_generator, # Report generation tool - COMMENTED: Only works locally epss_tool, # EPSS Tool nvd_tool, # NVD Tool kevin_tool, # KEVin Tool ] def validate_hf_api_key(api_key: str) -> tuple[bool, str]: """Validate Hugging Face API key by making a test request.""" if not api_key or not api_key.strip(): return False, "❌ API key cannot be empty" api_key = api_key.strip() # Basic format validation if not api_key.startswith("hf_"): return False, "❌ Invalid API key format. Hugging Face API keys start with 'hf_'" try: # Test the API key by making a simple request api = HfApi(token=api_key) # Try to get user info to validate the token user_info = api.whoami() return True, f"✅ API key validated successfully! Welcome, {user_info.get('name', 'User')}!" except Exception as e: return False, f"❌ Invalid API key: {str(e)}" def create_model_with_api_key(hf_token: str, model_id: str = None) -> InferenceClientModel: """Create a new InferenceClientModel instance with the provided HF_TOKEN.""" if not model_id: model_id = os.getenv("MODEL_ID", "Qwen/Qwen2.5-Coder-32B-Instruct") logger.info(f"Creating model {model_id} with token: {hf_token[:10]}...") # First, try to login with the token to ensure it's properly set try: login(hf_token) logger.info("Successfully logged in with token") except Exception as e: logger.warning(f"Login failed: {e}") # Create the model with explicit token model = InferenceClientModel( model_id, custom_role_conversions={ "tool-call": "assistant", "tool-response": "user" }, token=hf_token ) # Verify the token is set correctly if hasattr(model, 'token'): logger.info(f"Model token attribute: {model.token[:10] if model.token else 'None'}...") else: logger.warning("Model does not have token attribute") # Test the model with a simple request to verify token works try: logger.info("Testing model with simple request...") # This is a simple test to see if the model can be accessed test_response = model.generate("Hello", max_new_tokens=5) logger.info("Model test successful") except Exception as e: logger.error(f"Model test failed: {e}") # Don't raise the exception, just log it logger.info(f"Model created successfully with token") return model def create_tools_with_model(model: InferenceClientModel) -> list: """Create tools list with the provided model.""" return [ web_search, # duckduckgo VisitTool(browser), PageUpTool(browser), PageDownTool(browser), FinderTool(browser), FindNextTool(browser), ArchiveSearchTool(browser), TextInspectorTool(model, text_limit), cvedb_tool, # CVEDB Tool # report_generator, # Report generation tool - COMMENTED: Only works locally epss_tool, # EPSS Tool nvd_tool, # NVD Tool kevin_tool, # KEVin Tool ] # Agent creation in a factory function def create_agent(hf_token: str = None, model_id: str = None, max_steps: int = 10): """Creates a fresh agent instance for each session""" if not hf_token: raise ValueError("A valid Hugging Face API key is required to create an agent.") logger.info(f"Creating agent with token: {hf_token[:10]}...") # Use session-specific model with HF_TOKEN model = create_model_with_api_key(hf_token, model_id) tools = create_tools_with_model(model) agent = CodeAgent( model=model, tools=[visualizer] + tools, max_steps=max_steps, verbosity_level=1, additional_authorized_imports=AUTHORIZED_IMPORTS, planning_interval=4, ) logger.info("Agent created successfully") return agent # Only create document_inspection_tool if default_model is available if default_model: document_inspection_tool = TextInspectorTool(default_model, 20000) else: document_inspection_tool = None class GradioUI: """A one-line interface to launch your agent in Gradio""" def __init__(self, file_upload_folder: str | None = None): self.file_upload_folder = file_upload_folder if self.file_upload_folder is not None: if not os.path.exists(file_upload_folder): os.mkdir(file_upload_folder) # Create reports directory self.reports_folder = "reports" if not os.path.exists(self.reports_folder): os.mkdir(self.reports_folder) def save_report(self, html_content: str) -> str: """Saves the HTML report and returns the file path.""" timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"vulnerability_report_{timestamp}.html" filepath = os.path.join(self.reports_folder, filename) with open(filepath, "w", encoding="utf-8") as f: f.write(html_content) return filepath def validate_api_key(self, api_key: str) -> tuple[str, str]: """Validate API key and return status message.""" is_valid, message = validate_hf_api_key(api_key) if is_valid: return message, "success" else: return message, "error" def interact_with_agent(self, prompt, messages, session_state): # Get or create session-specific agent if "agent" not in session_state: # Check if we have a valid HF_TOKEN in session hf_token = session_state.get("hf_token") # If no token in session, try to get it from .env file if not hf_token: env_token = os.getenv("HF_TOKEN") if env_token: hf_token = env_token session_state["hf_token"] = env_token session_state["max_steps"] = 10 # Default max_steps logger.info(f"Using HF_TOKEN from .env file: {env_token[:10]}...") else: logger.warning("No API key found in session state or .env file") error_msg = "❌ No API key provided. Please enter your Hugging Face API key in the API Configuration section above or set HF_TOKEN in your .env file." messages.append(gr.ChatMessage(role="assistant", content=error_msg)) yield messages return logger.info(f"Agent not in session, checking for token: {hf_token[:10] if hf_token else 'None'}...") if hf_token: try: max_steps = session_state.get("max_steps", 10) session_state["agent"] = create_agent(hf_token, max_steps=max_steps) logger.info("Agent created successfully in interact_with_agent") except Exception as e: logger.error(f"Failed to create agent in interact_with_agent: {e}") error_msg = f"❌ Failed to create agent with provided API key: {str(e)}" messages.append(gr.ChatMessage(role="assistant", content=error_msg)) yield messages return else: logger.info("Agent already exists in session") # Adding monitoring try: # log the existence of agent memory has_memory = hasattr(session_state["agent"], "memory") print(f"Agent has memory: {has_memory}") if has_memory: print(f"Memory type: {type(session_state['agent'].memory)}") # Prepare the system prompt system_prompt = f"""You are a Vulnerability Intelligence Analyst. Complete the user request in {session_state.get('max_steps', 10)} steps maximum. AVAILABLE TOOLS: nvd_search, web_search, cvedb_search, kevin_search, epss_search CRITICAL RULES: 1. VERSION ANALYSIS: - FIRST: Check current version via web search to understand the latest available version - PRIORITY: When user asks for specific version, focus ONLY on vulnerabilities affecting that version and newer - EXCLUDE OLDER: Do NOT report vulnerabilities that only affect older versions - VERSION LOGIC: * "up to X" or "before X" = affects versions UP TO X, NOT newer versions * "X+" or "X and later" = affects X and newer versions * "X through Y" = affects versions X to Y inclusive - CORRECT LOGIC: If CVE affects "up to v22.1" and user asks about v24.0 then v24.0 is NOT vulnerable - CORRECT LOGIC: If CVE affects "v25.0+" and user asks about v24.0 then v24.0 is NOT vulnerable - CORRECT LOGIC: If CVE affects "below v25.0" and user asks about v24.0 then v24.0 is vulnerable 2. DATES: Use current date for "today" or "current". Use: from datetime import datetime; today = datetime.now().strftime("%Y-%m-%d") 3. PRODUCT SEARCH: ALWAYS use ONLY the base product name, NEVER include versions when using vulnerability tools (nvd_search, cvedb_search, kevin_search, epss_search) 4. SOURCES: Always prioritize vendor/original sources for CVE, CWE, and reference links (official vendor websites, security advisories) 5. SIMPLICITY: Keep code simple and logical. Avoid unnecessary library imports. Use only basic Python functions when needed. 6. TOOL USAGE: Use ONLY the available tools. Do not complicate tool calls with unnecessary code. 7. STRING OPERATIONS: Use simple Python methods (in, find, startswith, endswith, etc.). NEVER use .contains() - it doesn't exist in Python. Avoid complex string parsing of tool results. 8. VULNERABILITY ANALYSIS: When analyzing tool results: - READ CAREFULLY: Pay attention to version ranges in vulnerability descriptions - "up to X" means versions UP TO X, NOT including newer versions - "below X" means versions BELOW X, NOT including X or newer - "X+" means X and newer versions - ONLY include vulnerabilities that actually affect the requested version - If unsure about version compatibility, exclude the vulnerability - DO NOT REASON: Don't create complex logic about version compatibility - DO NOT ASSUME: If a CVE affects "up to 22.1", it does NOT affect 24.0 - SIMPLE RULE: Only include CVEs where the version range explicitly includes the requested version 9. REPORT GENERATION: Do NOT create complex functions, or loops for the final answer. Use the information collected and format it directly following the REPORT FORMAT. REPORT FORMAT: # Vulnerability Report ### [Software and Version] #### CVE-ID: [CVE-YYYY-NNNNN] **NIST NVD Link:** https://nvd.nist.gov/vuln/detail/CVE-YYYY-NNNNN - **Attack Type:** [Type] - **Published:** [Date] - **CVSS:** [Score] - **EPSS:** [Score] - **KEV:** [Yes/No] - **Affected Versions:** [Specific range] - **Description:** [Description] - **CWE:** [CWE-XXX] - https://cwe.mitre.org/data/definitions/XXX.html - **Recommendations:** [Remediation advice] - **Sources:** - https://oficial-vendor-website.com/security-advisory - https://official-security-source.com - https://additional-reference-source.com INSTRUCTIONS: - Follow the exact format above - Use official vendor sources when available - Complete the task efficiently within the step limit Now it is your turn, remember to keep code simple. User Query: """ # Combine system prompt with user message full_prompt = system_prompt + prompt messages.append(gr.ChatMessage(role="user", content=prompt)) yield messages logger.info("Starting agent interaction...") for msg in stream_to_gradio( session_state["agent"], task=full_prompt, reset_agent_memory=False ): # If the message contains an HTML report, we save it and update the message if isinstance(msg.content, str) and msg.content.startswith(""): report_path = self.save_report(msg.content) msg.content = f"Report generated and saved at: {report_path}\n\nYou can open the file in your browser to view the complete report." messages.append(msg) yield messages yield messages except Exception as e: logger.error(f"Error in interaction: {str(e)}") print(f"Error in interaction: {str(e)}") error_msg = f"❌ Error during interaction: {str(e)}" messages.append(gr.ChatMessage(role="assistant", content=error_msg)) yield messages def setup_api_key(self, api_key: str, max_steps: int, session_state) -> str: """Setup API key for the session.""" # Check if API key is provided from interface if api_key and api_key.strip(): # Use the API key from interface token_to_use = api_key.strip() source = "interface" else: # Try to use token from .env file env_token = os.getenv("HF_TOKEN") if env_token: token_to_use = env_token source = ".env file" else: return "❌ No API key provided. Please enter your Hugging Face API key or set HF_TOKEN in your .env file." # Validate the token is_valid, message = validate_hf_api_key(token_to_use) if is_valid: # Store HF_TOKEN in session state session_state["hf_token"] = token_to_use session_state["max_steps"] = max_steps logger.info(f"API key stored in session from {source}: {token_to_use[:10]}...") logger.info(f"Max steps set to: {max_steps}") # Also set the environment variable for smolagents os.environ["HF_TOKEN"] = token_to_use logger.info("HF_TOKEN environment variable set") # Create new agent with the HF_TOKEN and max_steps try: session_state["agent"] = create_agent(token_to_use, max_steps=max_steps) logger.info("Agent created successfully in setup_api_key") return f"✅ API key from {source} validated and agent created successfully! {message.split('!')[1] if '!' in message else ''}" except Exception as e: logger.error(f"Failed to create agent in setup_api_key: {e}") return f"❌ Failed to create agent with API key from {source}: {str(e)}" else: logger.warning(f"Invalid API key from {source}: {token_to_use[:10] if token_to_use else 'None'}...") return f"❌ Invalid API key from {source}: {message}" def upload_file( self, file, file_uploads_log, allowed_file_types=[ "application/pdf", "application/vnd.openxmlformats-officedocument.wordprocessingml.document", "text/plain", ], ): """ Handle file uploads, default allowed types are .pdf, .docx, and .txt """ if file is None: return gr.Textbox("No file uploaded", visible=True), file_uploads_log try: mime_type, _ = mimetypes.guess_type(file.name) except Exception as e: return gr.Textbox(f"Error: {e}", visible=True), file_uploads_log if mime_type not in allowed_file_types: return gr.Textbox("File type disallowed", visible=True), file_uploads_log # Sanitize file name original_name = os.path.basename(file.name) sanitized_name = re.sub( r"[^\w\-.]", "_", original_name ) # Replace any non-alphanumeric, non-dash, or non-dot characters with underscores type_to_ext = {} for ext, t in mimetypes.types_map.items(): if t not in type_to_ext: type_to_ext[t] = ext # Ensure the extension correlates to the mime type sanitized_name = sanitized_name.split(".")[:-1] sanitized_name.append("" + type_to_ext[mime_type]) sanitized_name = "".join(sanitized_name) # Save the uploaded file to the specified folder file_path = os.path.join( self.file_upload_folder, os.path.basename(sanitized_name) ) shutil.copy(file.name, file_path) return gr.Textbox( f"File uploaded: {file_path}", visible=True ), file_uploads_log + [file_path] def log_user_message(self, text_input, file_uploads_log): return ( text_input + ( f"\nYou have been provided with these files, which might be helpful or not: {file_uploads_log}" if len(file_uploads_log) > 0 else "" ), gr.Textbox( value="", interactive=False, placeholder="Please wait while Steps are getting populated", ), gr.Button(interactive=False), ) def detect_device(self, request: gr.Request): # Check whether the user device is a mobile or a computer if not request: return "Unknown device" # Method 1: Check sec-ch-ua-mobile header is_mobile_header = request.headers.get("sec-ch-ua-mobile") if is_mobile_header: return "Mobile" if "?1" in is_mobile_header else "Desktop" # Method 2: Check user-agent string user_agent = request.headers.get("user-agent", "").lower() mobile_keywords = ["android", "iphone", "ipad", "mobile", "phone"] if any(keyword in user_agent for keyword in mobile_keywords): return "Mobile" # Method 3: Check platform platform = request.headers.get("sec-ch-ua-platform", "").lower() if platform: if platform in ['"android"', '"ios"']: return "Mobile" elif platform in ['"windows"', '"macos"', '"linux"']: return "Desktop" # Default case if no clear indicators return "Desktop" def launch(self, **kwargs): with gr.Blocks(theme="ocean", fill_height=True) as demo: # Different layouts for mobile and computer devices @gr.render() def layout(request: gr.Request): device = self.detect_device(request) print(f"device - {device}") # Render layout with sidebar if device == "Desktop": with gr.Blocks( fill_height=True, ): file_uploads_log = gr.State([]) with gr.Sidebar(): # Project title and repository link at the top gr.Markdown("""# Open Deep Research Vulnerability Intelligence""") gr.Markdown(""" Github Repository""") # About section with gr.Accordion("ℹ️ About", open=False): gr.Markdown("""**What it does:** This AI agent specializes in automated vulnerability research and analysis, built on Hugging Face's Open Deep Research architecture. It can search across multiple security databases to provide comprehensive vulnerability intelligence reports. **Available Tools & APIs:** - 🛡️ NIST NVD - National Vulnerability Database (free API) - 📊 Shodan CVEDB - Comprehensive vulnerability database (free API) - ⚠️ KEVin - Known Exploited Vulnerabilities database (free API) - 📈 EPSS - Exploit Prediction Scoring System (free API) - 🌐 **Web Browser** - Navigate and extract information from web pages **Model Configuration:** - **Default Model**: Qwen/Qwen2.5-Coder-32B-Instruct (recommended) - **Alternative**: You can also use Ollama with local models for privacy **How to use:** 1. Enter your Hugging Face API key below 2. Ask about specific software versions, CVEs, or security vulnerabilities 3. The agent will automatically search all available databases 4. Receive comprehensive vulnerability reports with CVSS scores, EPSS predictions, and remediation advice""") with gr.Group(): gr.Markdown("**Your request**", container=True) text_input = gr.Textbox( lines=3, label="Your request", container=False, placeholder="Enter your prompt here and press Shift+Enter or press the button", ) launch_research_btn = gr.Button( "Run", variant="primary" ) # Examples Section with gr.Accordion("💡 Example Prompts", open=False): gr.Markdown("**Click any example below to populate your request field:**") example_btn_1 = gr.Button("🔍 MobaXterm 24.0 vulnerabilities", size="sm", variant="secondary") example_btn_2 = gr.Button("🔍 Chrome 120.0.6099.109 security issues", size="sm", variant="secondary") example_btn_3 = gr.Button("🔍 Apache Tomcat 9.0.65 KEV check", size="sm", variant="secondary") example_btn_4 = gr.Button("🔍 Windows 11 recent vulnerabilities", size="sm", variant="secondary") example_btn_5 = gr.Button("🔍 CVE-2024-0001 analysis", size="sm", variant="secondary") example_btn_6 = gr.Button("🔍 Nginx 1.24.0 security status", size="sm", variant="secondary") # Example button events example_btn_1.click( lambda: "Analyze MobaXterm 24.0 for vulnerabilities as of today", None, [text_input] ) example_btn_2.click( lambda: "Check Chrome 120.0.6099.109 for security vulnerabilities", None, [text_input] ) example_btn_3.click( lambda: "Is Apache Tomcat 9.0.65 in KEV database as of today?", None, [text_input] ) example_btn_4.click( lambda: "Check Windows 11 for recent vulnerabilities as of today", None, [text_input] ) example_btn_5.click( lambda: "Analyze CVE-2024-0001 in detail", None, [text_input] ) example_btn_6.click( lambda: "Check Nginx 1.24.0 for security vulnerabilities", None, [text_input] ) # API Key Configuration Section with gr.Accordion("🔑 API Configuration", open=False): gr.Markdown("**Configure your Hugging Face API Key**") gr.Markdown("All API keys are stored only in session memory, not persisted.") gr.Markdown("Get your API key from: https://huggingface.co/settings/tokens") api_key_input = gr.Textbox( label="Hugging Face API Key", placeholder="hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", type="password", lines=1 ) api_key_status = gr.Textbox( label="Status", value="✅ HF_TOKEN found in .env file. To use a different key, enter it above and click 'Setup API Key'." if os.getenv("HF_TOKEN") else "⚠️ Please enter your Hugging Face API key or set HF_TOKEN in your .env file.", interactive=False ) # Agent configuration gr.Markdown("**Agent Configuration**") max_steps_slider = gr.Slider( minimum=5, maximum=30, value=10, step=1, label="Maximum Steps", info="Number of steps the agent can take per session (higher = more detailed but slower)" ) setup_api_btn = gr.Button("Setup API Key", variant="secondary") # If an upload folder is provided, enable the upload feature if self.file_upload_folder is not None: upload_file = gr.File(label="Upload a file") upload_status = gr.Textbox( label="Upload Status", interactive=False, visible=False, ) upload_file.change( self.upload_file, [upload_file, file_uploads_log], [upload_status, file_uploads_log], ) # Powered by smolagents with gr.Row(): gr.HTML("""
Powered by logo hf/smolagents
""") # Add session state to store session-specific data session_state = gr.State( {} ) # Initialize empty state for each session stored_messages = gr.State([]) chatbot = gr.Chatbot( label="open-Deep-Research", type="messages", avatar_images=( None, "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png", ), resizeable=False, scale=1, elem_id="my-chatbot", ) # Add component to display reports report_viewer = gr.HTML(label="Vulnerability Report", visible=False) # API Key setup event setup_api_btn.click( self.setup_api_key, [api_key_input, max_steps_slider, session_state], [api_key_status] ) text_input.submit( self.log_user_message, [text_input, file_uploads_log], [stored_messages, text_input, launch_research_btn], ).then( self.interact_with_agent, # Include session_state in function calls [stored_messages, chatbot, session_state], [chatbot], ).then( lambda: ( gr.Textbox( interactive=True, placeholder="Enter your prompt here and press the button", ), gr.Button(interactive=True), ), None, [text_input, launch_research_btn], ) launch_research_btn.click( self.log_user_message, [text_input, file_uploads_log], [stored_messages, text_input, launch_research_btn], ).then( self.interact_with_agent, # Include session_state in function calls [stored_messages, chatbot, session_state], [chatbot], ).then( lambda: ( gr.Textbox( interactive=True, placeholder="Enter your prompt here and press the button", ), gr.Button(interactive=True), ), None, [text_input, launch_research_btn], ) # Render simple layout else: with gr.Blocks( fill_height=True, ): # Project title and repository link at the top gr.Markdown("""# Open Deep Research Vulnerability Intelligence""") gr.Markdown(""" Github Repository""") # About section for mobile with gr.Accordion("ℹ️ About", open=False): gr.Markdown("""**What it does:** This AI agent specializes in automated vulnerability research and analysis, built on Hugging Face's Open Deep Research architecture. It can search across multiple security databases to provide comprehensive vulnerability intelligence reports. **Available Tools & APIs:** - 🛡️ NIST NVD - National Vulnerability Database (free API) - 📊 Shodan CVEDB - Comprehensive vulnerability database (free API) - ⚠️ KEVin - Known Exploited Vulnerabilities database (free API) - 📈 EPSS - Exploit Prediction Scoring System (free API) - 🌐 **Web Browser** - Navigate and extract information from web pages **Model Configuration:** - **Default Model**: Qwen/Qwen2.5-Coder-32B-Instruct (recommended) - **Alternative**: You can also use Ollama with local models for privacy **How to use:** 1. Enter your Hugging Face API key below 2. Ask about specific software versions, CVEs, or security vulnerabilities 3. The agent will automatically search all available databases 4. Receive comprehensive vulnerability reports with CVSS scores, EPSS predictions, and remediation advice""") # API Key Configuration Section for Mobile with gr.Accordion("🔑 API Configuration", open=False): gr.Markdown("**Configure your Hugging Face API Key**") gr.Markdown("Due to recent API changes, you need to provide your own Hugging Face API key to use this application.") gr.Markdown("Get your API key from: https://huggingface.co/settings/tokens") mobile_api_key_input = gr.Textbox( label="Hugging Face API Key", placeholder="hf_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", type="password", lines=1 ) mobile_api_key_status = gr.Textbox( label="Status", value="✅ HF_TOKEN found in .env file. To use a different key, enter it above and click 'Setup API Key'." if os.getenv("HF_TOKEN") else "⚠️ Please enter your Hugging Face API key or set HF_TOKEN in your .env file.", interactive=False ) # Agent configuration for mobile gr.Markdown("**Agent Configuration**") mobile_max_steps_slider = gr.Slider( minimum=5, maximum=30, value=10, step=1, label="Maximum Steps", info="Number of steps the agent can take per session (higher = more detailed but slower)" ) mobile_setup_api_btn = gr.Button("Setup API Key", variant="secondary") # Add session state to store session-specific data session_state = gr.State( {} ) # Initialize empty state for each session stored_messages = gr.State([]) file_uploads_log = gr.State([]) chatbot = gr.Chatbot( label="open-Deep-Research", type="messages", avatar_images=( None, "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/mascot_smol.png", ), resizeable=True, scale=1, ) # Mobile API Key setup event mobile_setup_api_btn.click( self.setup_api_key, [mobile_api_key_input, mobile_max_steps_slider, session_state], [mobile_api_key_status] ) # Mobile Example button events mobile_example_btn_1 = gr.Button("🔍 MobaXterm 24.0 vulnerabilities", size="sm", variant="secondary") mobile_example_btn_2 = gr.Button("🔍 Chrome 120.0.6099.109 security analysis", size="sm", variant="secondary") mobile_example_btn_3 = gr.Button("🔍 Apache Tomcat 9.0.65 KEV check", size="sm", variant="secondary") # Mobile Example button events mobile_example_btn_1.click( lambda: "Analyze MobaXterm 24.0 for vulnerabilities as of today", None, [text_input] ) mobile_example_btn_2.click( lambda: "Check Chrome 120.0.6099.109 for current security issues", None, [text_input] ) mobile_example_btn_3.click( lambda: "Is Apache Tomcat 9.0.65 in KEV database as of today?", None, [text_input] ) # Powered by smolagents for mobile with gr.Row(): gr.HTML("""
Powered by logo hf/smolagents
""") text_input = gr.Textbox( lines=1, label="Your request", placeholder="Enter your prompt here and press the button", ) launch_research_btn = gr.Button( "Run", variant="primary", ) # Examples Section for Mobile with gr.Accordion("💡 Example Prompts", open=False): gr.Markdown("**Click any example below to populate your request field:**") mobile_example_btn_1 = gr.Button("🔍 MobaXterm 24.0 vulnerabilities", size="sm", variant="secondary") mobile_example_btn_2 = gr.Button("🔍 Chrome 120.0.6099.109 security analysis", size="sm", variant="secondary") mobile_example_btn_3 = gr.Button("🔍 Apache Tomcat 9.0.65 KEV check", size="sm", variant="secondary") # Mobile Example button events mobile_example_btn_1.click( lambda: "Analyze MobaXterm 24.0 for vulnerabilities as of today", None, [text_input] ) mobile_example_btn_2.click( lambda: "Check Chrome 120.0.6099.109 for current security issues", None, [text_input] ) mobile_example_btn_3.click( lambda: "Is Apache Tomcat 9.0.65 in KEV database as of today?", None, [text_input] ) # Powered by smolagents for mobile with gr.Row(): gr.HTML("""
Powered by logo hf/smolagents
""") text_input.submit( self.log_user_message, [text_input, file_uploads_log], [stored_messages, text_input, launch_research_btn], ).then( self.interact_with_agent, # Include session_state in function calls [stored_messages, chatbot, session_state], [chatbot], ).then( lambda: ( gr.Textbox( interactive=True, placeholder="Enter your prompt here and press the button", ), gr.Button(interactive=True), ), None, [text_input, launch_research_btn], ) launch_research_btn.click( self.log_user_message, [text_input, file_uploads_log], [stored_messages, text_input, launch_research_btn], ).then( self.interact_with_agent, # Include session_state in function calls [stored_messages, chatbot, session_state], [chatbot], ).then( lambda: ( gr.Textbox( interactive=True, placeholder="Enter your prompt here and press the button", ), gr.Button(interactive=True), ), None, [text_input, launch_research_btn], ) demo.launch(debug=True, **kwargs) # can this fix ctrl-c no response? no try: GradioUI().launch(mcp_server=True) except KeyboardInterrupt: ...