instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Create Google-style docstrings for my code |
from __future__ import annotations
import os
from dataclasses import dataclass, field
from typing import Optional, List
from pathlib import Path
def _default_workspace():
from common.utils import expand_path
return expand_path("~/cow")
@dataclass
class MemoryConfig:
# Storage paths (default: ~/cow)
workspace_root: str = field(default_factory=_default_workspace)
# Embedding config
embedding_provider: str = "openai" # "openai" | "local"
embedding_model: str = "text-embedding-3-small"
embedding_dim: int = 1536
# Chunking config
chunk_max_tokens: int = 500
chunk_overlap_tokens: int = 50
# Search config
max_results: int = 10
min_score: float = 0.1
# Hybrid search weights
vector_weight: float = 0.7
keyword_weight: float = 0.3
# Memory sources
sources: List[str] = field(default_factory=lambda: ["memory", "session"])
# Sync config
enable_auto_sync: bool = True
sync_on_search: bool = True
def get_workspace(self) -> Path:
return Path(self.workspace_root)
def get_memory_dir(self) -> Path:
return self.get_workspace() / "memory"
def get_db_path(self) -> Path:
index_dir = self.get_memory_dir() / "long-term"
index_dir.mkdir(parents=True, exist_ok=True)
return index_dir / "index.db"
def get_skills_dir(self) -> Path:
return self.get_workspace() / "skills"
def get_agent_workspace(self, agent_name: Optional[str] = None) -> Path:
workspace = self.get_workspace()
# Ensure workspace directory exists
workspace.mkdir(parents=True, exist_ok=True)
return workspace
# Global memory configuration
_global_memory_config: Optional[MemoryConfig] = None
def get_default_memory_config() -> MemoryConfig:
global _global_memory_config
if _global_memory_config is None:
_global_memory_config = MemoryConfig()
return _global_memory_config
def set_global_memory_config(config: MemoryConfig):
global _global_memory_config
_global_memory_config = config | --- +++ @@ -1,3 +1,8 @@+"""
+Memory configuration module
+
+Provides global memory configuration with simplified workspace structure
+"""
from __future__ import annotations
import os
@@ -7,12 +12,14 @@
def _default_workspace():
+ """Get default workspace path with proper Windows support"""
from common.utils import expand_path
return expand_path("~/cow")
@dataclass
class MemoryConfig:
+ """Configuration for memory storage and search"""
# Storage paths (default: ~/cow)
workspace_root: str = field(default_factory=_default_workspace)
@@ -43,20 +50,33 @@
def get_workspace(self) -> Path:
+ """Get workspace root directory"""
return Path(self.workspace_root)
def get_memory_dir(self) -> Path:
+ """Get memory files directory"""
return self.get_workspace() / "memory"
def get_db_path(self) -> Path:
+ """Get SQLite database path for long-term memory index"""
index_dir = self.get_memory_dir() / "long-term"
index_dir.mkdir(parents=True, exist_ok=True)
return index_dir / "index.db"
def get_skills_dir(self) -> Path:
+ """Get skills directory"""
return self.get_workspace() / "skills"
def get_agent_workspace(self, agent_name: Optional[str] = None) -> Path:
+ """
+ Get workspace directory for an agent
+
+ Args:
+ agent_name: Optional agent name (not used in current implementation)
+
+ Returns:
+ Path to workspace directory
+ """
workspace = self.get_workspace()
# Ensure workspace directory exists
workspace.mkdir(parents=True, exist_ok=True)
@@ -68,6 +88,13 @@
def get_default_memory_config() -> MemoryConfig:
+ """
+ Get the global memory configuration.
+ If not set, returns a default configuration.
+
+ Returns:
+ MemoryConfig instance
+ """
global _global_memory_config
if _global_memory_config is None:
_global_memory_config = MemoryConfig()
@@ -75,5 +102,21 @@
def set_global_memory_config(config: MemoryConfig):
+ """
+ Set the global memory configuration.
+ This should be called before creating any MemoryManager instances.
+
+ Args:
+ config: MemoryConfig instance to use globally
+
+ Example:
+ >>> from agent.memory import MemoryConfig, set_global_memory_config
+ >>> config = MemoryConfig(
+ ... workspace_root="~/my_agents",
+ ... embedding_provider="openai",
+ ... vector_weight=0.8
+ ... )
+ >>> set_global_memory_config(config)
+ """
global _global_memory_config
- _global_memory_config = config+ _global_memory_config = config
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/memory/config.py |
Expand my code with proper documentation strings | import importlib
import importlib.util
from pathlib import Path
from typing import Dict, Any, Type
from agent.tools.base_tool import BaseTool
from common.log import logger
from config import conf
class ToolManager:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(ToolManager, cls).__new__(cls)
cls._instance.tool_classes = {} # Store tool classes instead of instances
cls._instance._initialized = False
return cls._instance
def __init__(self):
# Initialize only once
if not hasattr(self, 'tool_classes'):
self.tool_classes = {} # Dictionary to store tool classes
def load_tools(self, tools_dir: str = "", config_dict=None):
if tools_dir:
self._load_tools_from_directory(tools_dir)
self._configure_tools_from_config()
else:
self._load_tools_from_init()
self._configure_tools_from_config(config_dict)
def _load_tools_from_init(self) -> bool:
try:
# Try to import the tools package
tools_package = importlib.import_module("agent.tools")
# Check if __all__ is defined
if hasattr(tools_package, "__all__"):
tool_classes = tools_package.__all__
# Import each tool class directly from the tools package
for class_name in tool_classes:
try:
# Skip base classes
if class_name in ["BaseTool", "ToolManager"]:
continue
# Get the class directly from the tools package
if hasattr(tools_package, class_name):
cls = getattr(tools_package, class_name)
if (
isinstance(cls, type)
and issubclass(cls, BaseTool)
and cls != BaseTool
):
try:
# Skip memory tools (they need special initialization with memory_manager)
if class_name in ["MemorySearchTool", "MemoryGetTool"]:
logger.debug(f"Skipped tool {class_name} (requires memory_manager)")
continue
# Create a temporary instance to get the name
temp_instance = cls()
tool_name = temp_instance.name
# Store the class, not the instance
self.tool_classes[tool_name] = cls
logger.debug(f"Loaded tool: {tool_name} from class {class_name}")
except ImportError as e:
# Handle missing dependencies with helpful messages
error_msg = str(e)
if "browser-use" in error_msg or "browser_use" in error_msg:
logger.warning(
f"[ToolManager] Browser tool not loaded - missing dependencies.\n"
f" To enable browser tool, run:\n"
f" pip install browser-use markdownify playwright\n"
f" playwright install chromium"
)
elif "markdownify" in error_msg:
logger.warning(
f"[ToolManager] {cls.__name__} not loaded - missing markdownify.\n"
f" Install with: pip install markdownify"
)
else:
logger.warning(f"[ToolManager] {cls.__name__} not loaded due to missing dependency: {error_msg}")
except Exception as e:
logger.error(f"Error initializing tool class {cls.__name__}: {e}")
except Exception as e:
logger.error(f"Error importing class {class_name}: {e}")
return len(self.tool_classes) > 0
return False
except ImportError:
logger.warning("Could not import agent.tools package")
return False
except Exception as e:
logger.error(f"Error loading tools from __init__.__all__: {e}")
return False
def _load_tools_from_directory(self, tools_dir: str):
tools_path = Path(tools_dir)
# Traverse all .py files
for py_file in tools_path.rglob("*.py"):
# Skip initialization files and base tool files
if py_file.name in ["__init__.py", "base_tool.py", "tool_manager.py"]:
continue
# Get module name
module_name = py_file.stem
try:
# Load module directly from file
spec = importlib.util.spec_from_file_location(module_name, py_file)
if spec and spec.loader:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# Find tool classes in the module
for attr_name in dir(module):
cls = getattr(module, attr_name)
if (
isinstance(cls, type)
and issubclass(cls, BaseTool)
and cls != BaseTool
):
try:
# Skip memory tools (they need special initialization with memory_manager)
if attr_name in ["MemorySearchTool", "MemoryGetTool"]:
logger.debug(f"Skipped tool {attr_name} (requires memory_manager)")
continue
# Create a temporary instance to get the name
temp_instance = cls()
tool_name = temp_instance.name
# Store the class, not the instance
self.tool_classes[tool_name] = cls
except ImportError as e:
# Handle missing dependencies with helpful messages
error_msg = str(e)
if "browser-use" in error_msg or "browser_use" in error_msg:
logger.warning(
f"[ToolManager] Browser tool not loaded - missing dependencies.\n"
f" To enable browser tool, run:\n"
f" pip install browser-use markdownify playwright\n"
f" playwright install chromium"
)
elif "markdownify" in error_msg:
logger.warning(
f"[ToolManager] {cls.__name__} not loaded - missing markdownify.\n"
f" Install with: pip install markdownify"
)
else:
logger.warning(f"[ToolManager] {cls.__name__} not loaded due to missing dependency: {error_msg}")
except Exception as e:
logger.error(f"Error initializing tool class {cls.__name__}: {e}")
except Exception as e:
print(f"Error importing module {py_file}: {e}")
def _configure_tools_from_config(self, config_dict=None):
try:
# Get tools configuration
tools_config = config_dict or conf().get("tools", {})
# Record tools that are configured but not loaded
missing_tools = []
# Store configurations for later use when instantiating
self.tool_configs = tools_config
# Check which configured tools are missing
for tool_name in tools_config:
if tool_name not in self.tool_classes:
missing_tools.append(tool_name)
# If there are missing tools, record warnings
if missing_tools:
for tool_name in missing_tools:
if tool_name == "browser":
logger.warning(
f"[ToolManager] Browser tool is configured but not loaded.\n"
f" To enable browser tool, run:\n"
f" pip install browser-use markdownify playwright\n"
f" playwright install chromium"
)
elif tool_name == "google_search":
logger.warning(
f"[ToolManager] Google Search tool is configured but may need API key.\n"
f" Get API key from: https://serper.dev\n"
f" Configure in config.json: tools.google_search.api_key"
)
else:
logger.warning(f"[ToolManager] Tool '{tool_name}' is configured but could not be loaded.")
except Exception as e:
logger.error(f"Error configuring tools from config: {e}")
def create_tool(self, name: str) -> BaseTool:
tool_class = self.tool_classes.get(name)
if tool_class:
# Create a new instance
tool_instance = tool_class()
# Apply configuration if available
if hasattr(self, 'tool_configs') and name in self.tool_configs:
tool_instance.config = self.tool_configs[name]
return tool_instance
return None
def list_tools(self) -> dict:
result = {}
for name, tool_class in self.tool_classes.items():
# Create a temporary instance to get schema
temp_instance = tool_class()
result[name] = {
"description": temp_instance.description,
"parameters": temp_instance.get_json_schema()
}
return result | --- +++ @@ -8,9 +8,13 @@
class ToolManager:
+ """
+ Tool manager for managing tools.
+ """
_instance = None
def __new__(cls):
+ """Singleton pattern to ensure only one instance of ToolManager exists."""
if cls._instance is None:
cls._instance = super(ToolManager, cls).__new__(cls)
cls._instance.tool_classes = {} # Store tool classes instead of instances
@@ -23,6 +27,11 @@ self.tool_classes = {} # Dictionary to store tool classes
def load_tools(self, tools_dir: str = "", config_dict=None):
+ """
+ Load tools from both directory and configuration.
+
+ :param tools_dir: Directory to scan for tool modules
+ """
if tools_dir:
self._load_tools_from_directory(tools_dir)
self._configure_tools_from_config()
@@ -31,6 +40,11 @@ self._configure_tools_from_config(config_dict)
def _load_tools_from_init(self) -> bool:
+ """
+ Load tool classes from tools.__init__.__all__
+
+ :return: True if tools were loaded, False otherwise
+ """
try:
# Try to import the tools package
tools_package = importlib.import_module("agent.tools")
@@ -99,6 +113,7 @@ return False
def _load_tools_from_directory(self, tools_dir: str):
+ """Dynamically load tool classes from directory"""
tools_path = Path(tools_dir)
# Traverse all .py files
@@ -159,6 +174,7 @@ print(f"Error importing module {py_file}: {e}")
def _configure_tools_from_config(self, config_dict=None):
+ """Configure tool classes based on configuration file"""
try:
# Get tools configuration
tools_config = config_dict or conf().get("tools", {})
@@ -197,6 +213,12 @@ logger.error(f"Error configuring tools from config: {e}")
def create_tool(self, name: str) -> BaseTool:
+ """
+ Get a new instance of a tool by name.
+
+ :param name: The name of the tool to get.
+ :return: A new instance of the tool or None if not found.
+ """
tool_class = self.tool_classes.get(name)
if tool_class:
# Create a new instance
@@ -210,6 +232,11 @@ return None
def list_tools(self) -> dict:
+ """
+ Get information about all loaded tools.
+
+ :return: A dictionary with tool information.
+ """
result = {}
for name, tool_class in self.tool_classes.items():
# Create a temporary instance to get schema
@@ -218,4 +245,4 @@ "description": temp_instance.description,
"parameters": temp_instance.get_json_schema()
}
- return result+ return result
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/tools/tool_manager.py |
Create docstrings for each class method |
import os
from typing import Dict, Any
from pathlib import Path
from agent.tools.base_tool import BaseTool, ToolResult
from common.utils import expand_path
class Write(BaseTool):
name: str = "write"
description: str = "Write content to a file. Creates the file if it doesn't exist, overwrites if it does. Automatically creates parent directories. IMPORTANT: Single write should not exceed 10KB. For large files, create a skeleton first, then use edit to add content in chunks."
params: dict = {
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "Path to the file to write (relative or absolute)"
},
"content": {
"type": "string",
"description": "Content to write to the file"
}
},
"required": ["path", "content"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self.cwd = self.config.get("cwd", os.getcwd())
self.memory_manager = self.config.get("memory_manager", None)
def execute(self, args: Dict[str, Any]) -> ToolResult:
path = args.get("path", "").strip()
content = args.get("content", "")
if not path:
return ToolResult.fail("Error: path parameter is required")
# Resolve path
absolute_path = self._resolve_path(path)
try:
# Create parent directory (if needed)
parent_dir = os.path.dirname(absolute_path)
if parent_dir:
os.makedirs(parent_dir, exist_ok=True)
# Write file
with open(absolute_path, 'w', encoding='utf-8') as f:
f.write(content)
# Get bytes written
bytes_written = len(content.encode('utf-8'))
# Auto-sync to memory database if this is a memory file
if self.memory_manager and 'memory/' in path:
self.memory_manager.mark_dirty()
result = {
"message": f"Successfully wrote {bytes_written} bytes to {path}",
"path": path,
"bytes_written": bytes_written
}
return ToolResult.success(result)
except PermissionError:
return ToolResult.fail(f"Error: Permission denied writing to {path}")
except Exception as e:
return ToolResult.fail(f"Error writing file: {str(e)}")
def _resolve_path(self, path: str) -> str:
# Expand ~ to user home directory
path = expand_path(path)
if os.path.isabs(path):
return path
return os.path.abspath(os.path.join(self.cwd, path)) | --- +++ @@ -1,3 +1,7 @@+"""
+Write tool - Write file content
+Creates or overwrites files, automatically creates parent directories
+"""
import os
from typing import Dict, Any
@@ -8,6 +12,7 @@
class Write(BaseTool):
+ """Tool for writing file content"""
name: str = "write"
description: str = "Write content to a file. Creates the file if it doesn't exist, overwrites if it does. Automatically creates parent directories. IMPORTANT: Single write should not exceed 10KB. For large files, create a skeleton first, then use edit to add content in chunks."
@@ -33,6 +38,12 @@ self.memory_manager = self.config.get("memory_manager", None)
def execute(self, args: Dict[str, Any]) -> ToolResult:
+ """
+ Execute file write operation
+
+ :param args: Contains file path and content
+ :return: Operation result
+ """
path = args.get("path", "").strip()
content = args.get("content", "")
@@ -73,8 +84,14 @@ return ToolResult.fail(f"Error writing file: {str(e)}")
def _resolve_path(self, path: str) -> str:
+ """
+ Resolve path to absolute path
+
+ :param path: Relative or absolute path
+ :return: Absolute path
+ """
# Expand ~ to user home directory
path = expand_path(path)
if os.path.isabs(path):
return path
- return os.path.abspath(os.path.join(self.cwd, path))+ return os.path.abspath(os.path.join(self.cwd, path))
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/tools/write/write.py |
Add docstrings following best practices |
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from linkai import LinkAIClient, PushMsg
from config import conf, pconf, plugin_config, available_setting, write_plugin_config, get_root
from plugins import PluginManager
import threading
import time
import json
import os
chat_client: LinkAIClient
CHANNEL_ACTIONS = {"channel_create", "channel_update", "channel_delete"}
# channelType -> config key mapping for app credentials
CREDENTIAL_MAP = {
"feishu": ("feishu_app_id", "feishu_app_secret"),
"dingtalk": ("dingtalk_client_id", "dingtalk_client_secret"),
"wecom_bot": ("wecom_bot_id", "wecom_bot_secret"),
"qq": ("qq_app_id", "qq_app_secret"),
"wechatmp": ("wechatmp_app_id", "wechatmp_app_secret"),
"wechatmp_service": ("wechatmp_app_id", "wechatmp_app_secret"),
"wechatcom_app": ("wechatcomapp_agent_id", "wechatcomapp_secret"),
}
class CloudClient(LinkAIClient):
def __init__(self, api_key: str, channel, host: str = ""):
super().__init__(api_key, host)
self.channel = channel
self.client_type = channel.channel_type
self.channel_mgr = None
self._skill_service = None
self._memory_service = None
self._chat_service = None
@property
def skill_service(self):
if self._skill_service is None:
try:
from agent.skills.manager import SkillManager
from agent.skills.service import SkillService
from config import conf
from common.utils import expand_path
workspace_root = expand_path(conf().get("agent_workspace", "~/cow"))
manager = SkillManager(custom_dir=os.path.join(workspace_root, "skills"))
self._skill_service = SkillService(manager)
logger.debug("[CloudClient] SkillService initialised")
except Exception as e:
logger.error(f"[CloudClient] Failed to init SkillService: {e}")
return self._skill_service
@property
def memory_service(self):
if self._memory_service is None:
try:
from agent.memory.service import MemoryService
from config import conf
from common.utils import expand_path
workspace_root = expand_path(conf().get("agent_workspace", "~/cow"))
self._memory_service = MemoryService(workspace_root)
logger.debug("[CloudClient] MemoryService initialised")
except Exception as e:
logger.error(f"[CloudClient] Failed to init MemoryService: {e}")
return self._memory_service
@property
def chat_service(self):
if self._chat_service is None:
try:
from agent.chat.service import ChatService
from bridge.bridge import Bridge
agent_bridge = Bridge().get_agent_bridge()
self._chat_service = ChatService(agent_bridge)
logger.debug("[CloudClient] ChatService initialised")
except Exception as e:
logger.error(f"[CloudClient] Failed to init ChatService: {e}")
return self._chat_service
# ------------------------------------------------------------------
# message push callback
# ------------------------------------------------------------------
def on_message(self, push_msg: PushMsg):
session_id = push_msg.session_id
msg_content = push_msg.msg_content
logger.info(f"receive msg push, session_id={session_id}, msg_content={msg_content}")
context = Context()
context.type = ContextType.TEXT
context["receiver"] = session_id
context["isgroup"] = push_msg.is_group
self.channel.send(Reply(ReplyType.TEXT, content=msg_content), context)
# ------------------------------------------------------------------
# config callback
# ------------------------------------------------------------------
def on_config(self, config: dict):
if not self.client_id:
return
logger.info(f"[CloudClient] Loading remote config: {config}")
action = config.get("action")
if action in CHANNEL_ACTIONS:
self._dispatch_channel_action(action, config.get("data", {}))
return
if config.get("enabled") != "Y":
return
local_config = conf()
need_restart_channel = False
for key in config.keys():
if key in available_setting and config.get(key) is not None:
local_config[key] = config.get(key)
# Voice settings
reply_voice_mode = config.get("reply_voice_mode")
if reply_voice_mode:
if reply_voice_mode == "voice_reply_voice":
local_config["voice_reply_voice"] = True
local_config["always_reply_voice"] = False
elif reply_voice_mode == "always_reply_voice":
local_config["always_reply_voice"] = True
local_config["voice_reply_voice"] = True
elif reply_voice_mode == "no_reply_voice":
local_config["always_reply_voice"] = False
local_config["voice_reply_voice"] = False
# Model configuration
if config.get("model"):
local_config["model"] = config.get("model")
# Channel configuration (legacy single-channel path)
if config.get("channelType"):
if local_config.get("channel_type") != config.get("channelType"):
local_config["channel_type"] = config.get("channelType")
need_restart_channel = True
# Channel-specific app credentials (legacy single-channel path)
current_channel_type = local_config.get("channel_type", "")
if self._set_channel_credentials(local_config, current_channel_type,
config.get("app_id"), config.get("app_secret")):
need_restart_channel = True
if config.get("admin_password"):
if not pconf("Godcmd"):
write_plugin_config({"Godcmd": {"password": config.get("admin_password"), "admin_users": []}})
else:
pconf("Godcmd")["password"] = config.get("admin_password")
PluginManager().instances["GODCMD"].reload()
if config.get("group_app_map") and pconf("linkai"):
local_group_map = {}
for mapping in config.get("group_app_map"):
local_group_map[mapping.get("group_name")] = mapping.get("app_code")
pconf("linkai")["group_app_map"] = local_group_map
PluginManager().instances["LINKAI"].reload()
if config.get("text_to_image") and config.get("text_to_image") == "midjourney" and pconf("linkai"):
if pconf("linkai")["midjourney"]:
pconf("linkai")["midjourney"]["enabled"] = True
pconf("linkai")["midjourney"]["use_image_create_prefix"] = True
elif config.get("text_to_image") and config.get("text_to_image") in ["dall-e-2", "dall-e-3"]:
if pconf("linkai")["midjourney"]:
pconf("linkai")["midjourney"]["use_image_create_prefix"] = False
self._save_config_to_file(local_config)
if need_restart_channel:
self._restart_channel(local_config.get("channel_type", ""))
# ------------------------------------------------------------------
# channel CRUD operations
# ------------------------------------------------------------------
def _dispatch_channel_action(self, action: str, data: dict):
channel_type = data.get("channelType")
if not channel_type:
logger.warning(f"[CloudClient] Channel action '{action}' missing channelType, data={data}")
return
logger.info(f"[CloudClient] Channel action: {action}, channelType={channel_type}")
if action == "channel_create":
self._handle_channel_create(channel_type, data)
elif action == "channel_update":
self._handle_channel_update(channel_type, data)
elif action == "channel_delete":
self._handle_channel_delete(channel_type, data)
def _handle_channel_create(self, channel_type: str, data: dict):
local_config = conf()
cred_changed = self._set_channel_credentials(
local_config, channel_type, data.get("appId"), data.get("appSecret"))
self._add_channel_type(local_config, channel_type)
self._save_config_to_file(local_config)
if not self.channel_mgr:
return
existing_ch = self.channel_mgr.get_channel(channel_type)
if existing_ch and not cred_changed:
logger.info(f"[CloudClient] Channel '{channel_type}' already running with same config, "
"skip restart, reporting status only")
threading.Thread(
target=self._report_channel_startup, args=(channel_type,), daemon=True
).start()
return
threading.Thread(
target=self._do_add_channel, args=(channel_type,), daemon=True
).start()
def _handle_channel_update(self, channel_type: str, data: dict):
local_config = conf()
enabled = data.get("enabled", "Y")
cred_changed = self._set_channel_credentials(
local_config, channel_type, data.get("appId"), data.get("appSecret"))
if enabled == "N":
self._remove_channel_type(local_config, channel_type)
else:
self._add_channel_type(local_config, channel_type)
self._save_config_to_file(local_config)
if not self.channel_mgr:
return
if enabled == "N":
threading.Thread(
target=self._do_remove_channel, args=(channel_type,), daemon=True
).start()
else:
existing_ch = self.channel_mgr.get_channel(channel_type)
if existing_ch and not cred_changed:
logger.info(f"[CloudClient] Channel '{channel_type}' already running with same config, "
"skip restart, reporting status only")
threading.Thread(
target=self._report_channel_startup, args=(channel_type,), daemon=True
).start()
else:
threading.Thread(
target=self._do_restart_channel, args=(self.channel_mgr, channel_type), daemon=True
).start()
def _handle_channel_delete(self, channel_type: str, data: dict):
local_config = conf()
self._clear_channel_credentials(local_config, channel_type)
self._remove_channel_type(local_config, channel_type)
self._save_config_to_file(local_config)
if self.channel_mgr:
threading.Thread(
target=self._do_remove_channel, args=(channel_type,), daemon=True
).start()
# ------------------------------------------------------------------
# channel credentials helpers
# ------------------------------------------------------------------
@staticmethod
def _set_channel_credentials(local_config: dict, channel_type: str,
app_id, app_secret) -> bool:
cred = CREDENTIAL_MAP.get(channel_type)
if not cred:
return False
id_key, secret_key = cred
changed = False
if app_id is not None and local_config.get(id_key) != app_id:
local_config[id_key] = app_id
os.environ[id_key.upper()] = str(app_id)
changed = True
if app_secret is not None and local_config.get(secret_key) != app_secret:
local_config[secret_key] = app_secret
os.environ[secret_key.upper()] = str(app_secret)
changed = True
if changed:
logger.info(f"[CloudClient] Synced {channel_type} credentials to conf and env")
return changed
@staticmethod
def _clear_channel_credentials(local_config: dict, channel_type: str):
cred = CREDENTIAL_MAP.get(channel_type)
if not cred:
return
id_key, secret_key = cred
local_config.pop(id_key, None)
local_config.pop(secret_key, None)
os.environ.pop(id_key.upper(), None)
os.environ.pop(secret_key.upper(), None)
# ------------------------------------------------------------------
# channel_type list helpers
# ------------------------------------------------------------------
@staticmethod
def _parse_channel_types(local_config: dict) -> list:
raw = local_config.get("channel_type", "")
if isinstance(raw, list):
return [ch.strip() for ch in raw if ch.strip()]
if isinstance(raw, str):
return [ch.strip() for ch in raw.split(",") if ch.strip()]
return []
@staticmethod
def _add_channel_type(local_config: dict, channel_type: str):
types = CloudClient._parse_channel_types(local_config)
if channel_type not in types:
types.append(channel_type)
local_config["channel_type"] = ", ".join(types)
@staticmethod
def _remove_channel_type(local_config: dict, channel_type: str):
types = CloudClient._parse_channel_types(local_config)
if channel_type in types:
types.remove(channel_type)
local_config["channel_type"] = ", ".join(types)
# ------------------------------------------------------------------
# channel manager thread helpers
# ------------------------------------------------------------------
def _do_add_channel(self, channel_type: str):
try:
self.channel_mgr.add_channel(channel_type)
logger.info(f"[CloudClient] Channel '{channel_type}' added successfully")
except Exception as e:
logger.error(f"[CloudClient] Failed to add channel '{channel_type}': {e}", exc_info=True)
self.send_channel_status(channel_type, "error", str(e))
return
self._report_channel_startup(channel_type)
def _do_remove_channel(self, channel_type: str):
try:
self.channel_mgr.remove_channel(channel_type)
logger.info(f"[CloudClient] Channel '{channel_type}' removed successfully")
except Exception as e:
logger.error(f"[CloudClient] Failed to remove channel '{channel_type}': {e}")
def _report_channel_startup(self, channel_type: str):
ch = self.channel_mgr.get_channel(channel_type)
if not ch:
self.send_channel_status(channel_type, "error", "channel instance not found")
return
success, error = ch.wait_startup(timeout=3)
if success:
logger.info(f"[CloudClient] Channel '{channel_type}' connected, reporting status")
self.send_channel_status(channel_type, "connected")
else:
logger.warning(f"[CloudClient] Channel '{channel_type}' startup failed: {error}")
self.send_channel_status(channel_type, "error", error)
# ------------------------------------------------------------------
# skill callback
# ------------------------------------------------------------------
def on_skill(self, data: dict) -> dict:
action = data.get("action", "")
payload = data.get("payload")
logger.info(f"[CloudClient] on_skill: action={action}")
svc = self.skill_service
if svc is None:
return {"action": action, "code": 500, "message": "SkillService not available", "payload": None}
return svc.dispatch(action, payload)
# ------------------------------------------------------------------
# memory callback
# ------------------------------------------------------------------
def on_memory(self, data: dict) -> dict:
action = data.get("action", "")
payload = data.get("payload")
logger.info(f"[CloudClient] on_memory: action={action}")
svc = self.memory_service
if svc is None:
return {"action": action, "code": 500, "message": "MemoryService not available", "payload": None}
return svc.dispatch(action, payload)
# ------------------------------------------------------------------
# chat callback
# ------------------------------------------------------------------
def on_chat(self, data: dict, send_chunk_fn):
payload = data.get("payload", {})
query = payload.get("query", "")
session_id = payload.get("session_id", "cloud_console")
channel_type = payload.get("channel_type", "")
if not session_id.startswith("session_"):
session_id = f"session_{session_id}"
logger.info(f"[CloudClient] on_chat: session={session_id}, channel={channel_type}, query={query[:80]}")
svc = self.chat_service
if svc is None:
raise RuntimeError("ChatService not available")
svc.run(query=query, session_id=session_id, channel_type=channel_type, send_chunk_fn=send_chunk_fn)
# ------------------------------------------------------------------
# history callback
# ------------------------------------------------------------------
def on_history(self, data: dict) -> dict:
action = data.get("action", "query")
payload = data.get("payload", {})
logger.info(f"[CloudClient] on_history: action={action}")
if action == "query":
return self._query_history(payload)
return {"action": action, "code": 404, "message": f"unknown action: {action}", "payload": None}
def _query_history(self, payload: dict) -> dict:
session_id = payload.get("session_id", "")
page = int(payload.get("page", 1))
page_size = int(payload.get("page_size", 20))
if not session_id:
return {
"action": "query",
"payload": {"status": "error", "message": "session_id required"},
}
# Web channel stores sessions with a "session_" prefix
if not session_id.startswith("session_"):
session_id = f"session_{session_id}"
logger.info(f"[CloudClient] history query: session={session_id}, page={page}, page_size={page_size}")
try:
from agent.memory.conversation_store import get_conversation_store
store = get_conversation_store()
result = store.load_history_page(
session_id=session_id,
page=page,
page_size=page_size,
)
return {
"action": "query",
"payload": {"status": "success", **result},
}
except Exception as e:
logger.error(f"[CloudClient] History query error: {e}")
return {
"action": "query",
"payload": {"status": "error", "message": str(e)},
}
# ------------------------------------------------------------------
# channel restart helpers
# ------------------------------------------------------------------
def _restart_channel(self, new_channel_type: str):
if self.channel_mgr:
logger.info(f"[CloudClient] Restarting channel to '{new_channel_type}'...")
threading.Thread(target=self._do_restart_channel, args=(self.channel_mgr, new_channel_type), daemon=True).start()
else:
logger.warning("[CloudClient] ChannelManager not available, please restart the application manually")
def _do_restart_channel(self, mgr, new_channel_type: str):
try:
mgr.restart(new_channel_type)
if mgr.channel:
self.channel = mgr.channel
self.client_type = mgr.channel.channel_type
logger.info(f"[CloudClient] Channel reference updated to '{new_channel_type}'")
except Exception as e:
logger.error(f"[CloudClient] Channel restart failed: {e}")
self.send_channel_status(new_channel_type, "error", str(e))
return
self._report_channel_startup(new_channel_type)
# ------------------------------------------------------------------
# config persistence
# ------------------------------------------------------------------
def _save_config_to_file(self, local_config: dict):
try:
config_path = os.path.join(get_root(), "config.json")
if not os.path.exists(config_path):
logger.warning(f"[CloudClient] config.json not found at {config_path}, skip saving")
return
with open(config_path, "r", encoding="utf-8") as f:
file_config = json.load(f)
file_config.update(dict(local_config))
with open(config_path, "w", encoding="utf-8") as f:
json.dump(file_config, f, indent=4, ensure_ascii=False)
logger.info("[CloudClient] Configuration saved to config.json successfully")
except Exception as e:
logger.error(f"[CloudClient] Failed to save configuration to config.json: {e}")
def get_root_domain(host: str = "") -> str:
if not host:
host = os.environ.get("CLOUD_HOST") or conf().get("cloud_host", "")
if not host:
return ""
host = host.strip().rstrip("/")
if "://" in host:
host = host.split("://", 1)[1]
host = host.split("/", 1)[0].split(":")[0]
parts = host.split(".")
if len(parts) >= 2:
return ".".join(parts[-2:])
return host
def get_deployment_id() -> str:
return os.environ.get("CLOUD_DEPLOYMENT_ID") or conf().get("cloud_deployment_id", "")
def get_website_base_url() -> str:
deployment_id = get_deployment_id()
if not deployment_id:
return ""
websites_domain = os.environ.get("CLOUD_WEBSITES_DOMAIN") or conf().get("cloud_websites_domain", "")
if websites_domain:
websites_domain = websites_domain.strip().rstrip("/")
return f"https://{websites_domain}/{deployment_id}"
domain = get_root_domain()
if not domain:
return ""
return f"https://app.{domain}/{deployment_id}"
def build_website_prompt(workspace_dir: str) -> list:
base_url = get_website_base_url()
if not base_url:
return []
return [
"**文件分享与网页生成规则** (非常重要 — 当前为云部署模式):",
"",
f"云端已为工作空间的 `websites/` 目录配置好公网路由映射,访问地址前缀为: `{base_url}`",
"",
"1. **网页/网站**: 编写网页、H5页面等前端代码时,**必须**将文件放到 `websites/` 目录中",
f" - 例如: `websites/index.html` → `{base_url}/index.html`",
f" - 例如: `websites/my-app/index.html` → `{base_url}/my-app/index.html`",
"",
"2. **生成文件分享** (PPT、PDF、图片、音视频等): 当你为用户生成了需要下载或查看的文件时,**可以**将文件保存到 `websites/` 目录中",
f" - 例如: 生成的PPT保存到 `websites/files/report.pptx` → 下载链接为 `{base_url}/files/report.pptx`",
" - 你仍然可以同时使用 `send` 工具发送文件(在飞书、钉钉等IM渠道中有效),但**必须同时在回复文本中提供下载链接**作为兜底,因为部分渠道(如网页端)无法通过 send 接收本地文件",
"",
"3. **必须发送链接**: 无论是网页还是文件,生成后**必须将完整的访问/下载链接直接写在回复文本中发送给用户**",
"",
"4. **文件名和路径尽量使用英文/拼音/数字等**,不要使用中文,避免链接无法访问",
"",
"5. 建议为每个独立项目在 `websites/` 下创建子目录,保持结构清晰",
"",
]
def start(channel, channel_mgr=None):
if not get_deployment_id():
return
global chat_client
chat_client = CloudClient(api_key=conf().get("linkai_api_key"), host=conf().get("cloud_host", ""), channel=channel)
chat_client.channel_mgr = channel_mgr
chat_client.config = _build_config()
chat_client.start()
time.sleep(1.5)
if chat_client.client_id:
logger.info("[CloudClient] Console: https://link-ai.tech/console/clients")
if channel_mgr:
channel_mgr.cloud_mode = True
threading.Thread(target=_report_existing_channels, args=(chat_client, channel_mgr), daemon=True).start()
def _report_existing_channels(client: CloudClient, mgr):
try:
for name, ch in list(mgr._channels.items()):
if name == "web":
continue
ch.cloud_mode = True
client._report_channel_startup(name)
except Exception as e:
logger.warning(f"[CloudClient] Failed to report existing channel status: {e}")
def _build_config():
local_conf = conf()
config = {
"linkai_app_code": local_conf.get("linkai_app_code"),
"single_chat_prefix": local_conf.get("single_chat_prefix"),
"single_chat_reply_prefix": local_conf.get("single_chat_reply_prefix"),
"single_chat_reply_suffix": local_conf.get("single_chat_reply_suffix"),
"group_chat_prefix": local_conf.get("group_chat_prefix"),
"group_chat_reply_prefix": local_conf.get("group_chat_reply_prefix"),
"group_chat_reply_suffix": local_conf.get("group_chat_reply_suffix"),
"group_name_white_list": local_conf.get("group_name_white_list"),
"nick_name_black_list": local_conf.get("nick_name_black_list"),
"speech_recognition": "Y" if local_conf.get("speech_recognition") else "N",
"text_to_image": local_conf.get("text_to_image"),
"image_create_prefix": local_conf.get("image_create_prefix"),
"model": local_conf.get("model"),
"agent_max_context_turns": local_conf.get("agent_max_context_turns"),
"agent_max_context_tokens": local_conf.get("agent_max_context_tokens"),
"agent_max_steps": local_conf.get("agent_max_steps"),
"channelType": local_conf.get("channel_type"),
}
if local_conf.get("always_reply_voice"):
config["reply_voice_mode"] = "always_reply_voice"
elif local_conf.get("voice_reply_voice"):
config["reply_voice_mode"] = "voice_reply_voice"
if pconf("linkai"):
config["group_app_map"] = pconf("linkai").get("group_app_map")
if plugin_config.get("Godcmd"):
config["admin_password"] = plugin_config.get("Godcmd").get("password")
# Add channel-specific app credentials
current_channel_type = local_conf.get("channel_type", "")
if current_channel_type == "feishu":
config["app_id"] = local_conf.get("feishu_app_id")
config["app_secret"] = local_conf.get("feishu_app_secret")
elif current_channel_type == "dingtalk":
config["app_id"] = local_conf.get("dingtalk_client_id")
config["app_secret"] = local_conf.get("dingtalk_client_secret")
elif current_channel_type in ("wechatmp", "wechatmp_service"):
config["app_id"] = local_conf.get("wechatmp_app_id")
config["app_secret"] = local_conf.get("wechatmp_app_secret")
elif current_channel_type == "wecom_bot":
config["app_id"] = local_conf.get("wecom_bot_id")
config["app_secret"] = local_conf.get("wecom_bot_secret")
elif current_channel_type == "qq":
config["app_id"] = local_conf.get("qq_app_id")
config["app_secret"] = local_conf.get("qq_app_secret")
elif current_channel_type == "wechatcom_app":
config["app_id"] = local_conf.get("wechatcomapp_agent_id")
config["app_secret"] = local_conf.get("wechatcomapp_secret")
return config | --- +++ @@ -1,3 +1,9 @@+"""
+Cloud management client for connecting to the LinkAI control console.
+
+Handles remote configuration sync, message push, and skill management
+via the LinkAI socket protocol.
+"""
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
@@ -40,6 +46,7 @@
@property
def skill_service(self):
+ """Lazy-init SkillService so it is available once SkillManager exists."""
if self._skill_service is None:
try:
from agent.skills.manager import SkillManager
@@ -56,6 +63,7 @@
@property
def memory_service(self):
+ """Lazy-init MemoryService."""
if self._memory_service is None:
try:
from agent.memory.service import MemoryService
@@ -70,6 +78,7 @@
@property
def chat_service(self):
+ """Lazy-init ChatService (requires AgentBridge via Bridge singleton)."""
if self._chat_service is None:
try:
from agent.chat.service import ChatService
@@ -262,6 +271,12 @@ @staticmethod
def _set_channel_credentials(local_config: dict, channel_type: str,
app_id, app_secret) -> bool:
+ """
+ Write app_id / app_secret into the correct config keys for *channel_type*.
+ Also syncs the values to environment variables (upper-cased key) so that
+ skills that rely on env-based checks (e.g. has_env_var) work immediately.
+ Returns True if any value actually changed.
+ """
cred = CREDENTIAL_MAP.get(channel_type)
if not cred:
return False
@@ -337,6 +352,7 @@ logger.error(f"[CloudClient] Failed to remove channel '{channel_type}': {e}")
def _report_channel_startup(self, channel_type: str):
+ """Wait for channel startup result and report to cloud."""
ch = self.channel_mgr.get_channel(channel_type)
if not ch:
self.send_channel_status(channel_type, "error", "channel instance not found")
@@ -353,6 +369,13 @@ # skill callback
# ------------------------------------------------------------------
def on_skill(self, data: dict) -> dict:
+ """
+ Handle SKILL messages from the cloud console.
+ Delegates to SkillService.dispatch for the actual operations.
+
+ :param data: message data with 'action', 'clientId', 'payload'
+ :return: response dict
+ """
action = data.get("action", "")
payload = data.get("payload")
logger.info(f"[CloudClient] on_skill: action={action}")
@@ -367,6 +390,13 @@ # memory callback
# ------------------------------------------------------------------
def on_memory(self, data: dict) -> dict:
+ """
+ Handle MEMORY messages from the cloud console.
+ Delegates to MemoryService.dispatch for the actual operations.
+
+ :param data: message data with 'action', 'clientId', 'payload'
+ :return: response dict
+ """
action = data.get("action", "")
payload = data.get("payload")
logger.info(f"[CloudClient] on_memory: action={action}")
@@ -381,6 +411,13 @@ # chat callback
# ------------------------------------------------------------------
def on_chat(self, data: dict, send_chunk_fn):
+ """
+ Handle CHAT messages from the cloud console.
+ Runs the agent in streaming mode and sends chunks back via send_chunk_fn.
+
+ :param data: message data with 'action' and 'payload' (query, session_id)
+ :param send_chunk_fn: callable(chunk_data: dict) to send one streaming chunk
+ """
payload = data.get("payload", {})
query = payload.get("query", "")
session_id = payload.get("session_id", "cloud_console")
@@ -399,6 +436,13 @@ # history callback
# ------------------------------------------------------------------
def on_history(self, data: dict) -> dict:
+ """
+ Handle HISTORY messages from the cloud console.
+ Returns paginated conversation history for a session.
+
+ :param data: message data with 'action' and 'payload' (session_id, page, page_size)
+ :return: response dict
+ """
action = data.get("action", "query")
payload = data.get("payload", {})
logger.info(f"[CloudClient] on_history: action={action}")
@@ -409,6 +453,7 @@ return {"action": action, "code": 404, "message": f"unknown action: {action}", "payload": None}
def _query_history(self, payload: dict) -> dict:
+ """Query paginated conversation history using ConversationStore."""
session_id = payload.get("session_id", "")
page = int(payload.get("page", 1))
page_size = int(payload.get("page_size", 20))
@@ -447,6 +492,9 @@ # channel restart helpers
# ------------------------------------------------------------------
def _restart_channel(self, new_channel_type: str):
+ """
+ Restart the channel via ChannelManager when channel type changes.
+ """
if self.channel_mgr:
logger.info(f"[CloudClient] Restarting channel to '{new_channel_type}'...")
threading.Thread(target=self._do_restart_channel, args=(self.channel_mgr, new_channel_type), daemon=True).start()
@@ -454,6 +502,9 @@ logger.warning("[CloudClient] ChannelManager not available, please restart the application manually")
def _do_restart_channel(self, mgr, new_channel_type: str):
+ """
+ Perform the channel restart in a separate thread to avoid blocking the config callback.
+ """
try:
mgr.restart(new_channel_type)
if mgr.channel:
@@ -470,6 +521,9 @@ # config persistence
# ------------------------------------------------------------------
def _save_config_to_file(self, local_config: dict):
+ """
+ Save configuration to config.json file.
+ """
try:
config_path = os.path.join(get_root(), "config.json")
if not os.path.exists(config_path):
@@ -490,6 +544,10 @@
def get_root_domain(host: str = "") -> str:
+ """Extract root domain from a hostname.
+
+ If *host* is empty, reads CLOUD_HOST env var / cloud_host config.
+ """
if not host:
host = os.environ.get("CLOUD_HOST") or conf().get("cloud_host", "")
if not host:
@@ -505,10 +563,15 @@
def get_deployment_id() -> str:
+ """Return cloud deployment id from env var or config."""
return os.environ.get("CLOUD_DEPLOYMENT_ID") or conf().get("cloud_deployment_id", "")
def get_website_base_url() -> str:
+ """Return the public URL prefix that maps to the workspace websites/ dir.
+
+ Returns empty string when cloud deployment is not configured.
+ """
deployment_id = get_deployment_id()
if not deployment_id:
return ""
@@ -525,6 +588,11 @@
def build_website_prompt(workspace_dir: str) -> list:
+ """Build system prompt lines for cloud website/file sharing rules.
+
+ Returns an empty list when cloud deployment is not configured,
+ so callers can safely do ``lines.extend(build_website_prompt(...))``.
+ """
base_url = get_website_base_url()
if not base_url:
return []
@@ -568,6 +636,7 @@
def _report_existing_channels(client: CloudClient, mgr):
+ """Report status for all channels that were started before cloud client connected."""
try:
for name, ch in list(mgr._channels.items()):
if name == "web":
@@ -632,4 +701,4 @@ config["app_id"] = local_conf.get("wechatcomapp_agent_id")
config["app_secret"] = local_conf.get("wechatcomapp_secret")
- return config+ return config
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/common/cloud_client.py |
Add well-formatted docstrings |
import importlib.util
import json
import logging
import os
import ssl
import threading
# -*- coding=utf-8 -*-
import uuid
import requests
import web
from bridge.context import Context
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from channel.chat_channel import ChatChannel, check_prefix
from channel.feishu.feishu_message import FeishuMessage
from common import utils
from common.expired_dict import ExpiredDict
from common.log import logger
from common.singleton import singleton
from config import conf
# Suppress verbose logs from Lark SDK
logging.getLogger("Lark").setLevel(logging.WARNING)
URL_VERIFICATION = "url_verification"
# Lazy-check for lark_oapi SDK availability without importing it at module level.
# The full `import lark_oapi` pulls in 10k+ files and takes 4-10s, so we defer
# the actual import to _startup_websocket() where it is needed.
LARK_SDK_AVAILABLE = importlib.util.find_spec("lark_oapi") is not None
lark = None # will be populated on first use via _ensure_lark_imported()
def _ensure_lark_imported():
global lark
if lark is None:
import lark_oapi as _lark
lark = _lark
return lark
@singleton
class FeiShuChanel(ChatChannel):
feishu_app_id = conf().get('feishu_app_id')
feishu_app_secret = conf().get('feishu_app_secret')
feishu_token = conf().get('feishu_token')
feishu_event_mode = conf().get('feishu_event_mode', 'websocket') # webhook 或 websocket
def __init__(self):
super().__init__()
# 历史消息id暂存,用于幂等控制
self.receivedMsgs = ExpiredDict(60 * 60 * 7.1)
self._http_server = None
self._ws_client = None
self._ws_thread = None
self._bot_open_id = None # cached bot open_id for @-mention matching
logger.debug("[FeiShu] app_id={}, app_secret={}, verification_token={}, event_mode={}".format(
self.feishu_app_id, self.feishu_app_secret, self.feishu_token, self.feishu_event_mode))
# 无需群校验和前缀
conf()["group_name_white_list"] = ["ALL_GROUP"]
conf()["single_chat_prefix"] = [""]
# 验证配置
if self.feishu_event_mode == 'websocket' and not LARK_SDK_AVAILABLE:
logger.error("[FeiShu] websocket mode requires lark_oapi. Please install: pip install lark-oapi")
raise Exception("lark_oapi not installed")
def startup(self):
self.feishu_app_id = conf().get('feishu_app_id')
self.feishu_app_secret = conf().get('feishu_app_secret')
self.feishu_token = conf().get('feishu_token')
self.feishu_event_mode = conf().get('feishu_event_mode', 'websocket')
self._fetch_bot_open_id()
if self.feishu_event_mode == 'websocket':
self._startup_websocket()
else:
self._startup_webhook()
def _fetch_bot_open_id(self):
try:
access_token = self.fetch_access_token()
if not access_token:
logger.warning("[FeiShu] Cannot fetch bot info: no access_token")
return
headers = {"Authorization": "Bearer " + access_token}
resp = requests.get("https://open.feishu.cn/open-apis/bot/v3/info/", headers=headers, timeout=5)
if resp.status_code == 200:
data = resp.json()
if data.get("code") == 0:
self._bot_open_id = data.get("bot", {}).get("open_id")
logger.info(f"[FeiShu] Bot open_id fetched: {self._bot_open_id}")
else:
logger.warning(f"[FeiShu] Fetch bot info failed: code={data.get('code')}, msg={data.get('msg')}")
except Exception as e:
logger.warning(f"[FeiShu] Fetch bot open_id error: {e}")
def stop(self):
import ctypes
logger.info("[FeiShu] stop() called")
ws_client = self._ws_client
self._ws_client = None
ws_thread = self._ws_thread
self._ws_thread = None
# Interrupt the ws thread first so its blocking start() unblocks
if ws_thread and ws_thread.is_alive():
try:
tid = ws_thread.ident
if tid:
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_ulong(tid), ctypes.py_object(SystemExit)
)
if res == 1:
logger.info("[FeiShu] Interrupted ws thread via ctypes")
elif res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_ulong(tid), None)
except Exception as e:
logger.warning(f"[FeiShu] Error interrupting ws thread: {e}")
# lark.ws.Client has no stop() method; thread interruption above is sufficient
if self._http_server:
try:
self._http_server.stop()
logger.info("[FeiShu] HTTP server stopped")
except Exception as e:
logger.warning(f"[FeiShu] Error stopping HTTP server: {e}")
self._http_server = None
logger.info("[FeiShu] stop() completed")
def _startup_webhook(self):
logger.debug("[FeiShu] Starting in webhook mode...")
urls = (
'/', 'channel.feishu.feishu_channel.FeishuController'
)
app = web.application(urls, globals(), autoreload=False)
port = conf().get("feishu_port", 9891)
func = web.httpserver.StaticMiddleware(app.wsgifunc())
func = web.httpserver.LogMiddleware(func)
server = web.httpserver.WSGIServer(("0.0.0.0", port), func)
self._http_server = server
try:
server.start()
except (KeyboardInterrupt, SystemExit):
server.stop()
def _startup_websocket(self):
_ensure_lark_imported()
logger.debug("[FeiShu] Starting in websocket mode...")
# 创建事件处理器
def handle_message_event(data: lark.im.v1.P2ImMessageReceiveV1) -> None:
try:
event_dict = json.loads(lark.JSON.marshal(data))
event = event_dict.get("event", {})
msg = event.get("message", {})
# Skip group messages that don't @-mention the bot (reduce log noise)
if msg.get("chat_type") == "group" and not msg.get("mentions") and msg.get("message_type") == "text":
return
logger.debug(f"[FeiShu] websocket receive event: {lark.JSON.marshal(data, indent=2)}")
# 处理消息
self._handle_message_event(event)
except Exception as e:
logger.error(f"[FeiShu] websocket handle message error: {e}", exc_info=True)
# 构建事件分发器
event_handler = lark.EventDispatcherHandler.builder("", "") \
.register_p2_im_message_receive_v1(handle_message_event) \
.build()
def start_client_with_retry():
import asyncio
import ssl as ssl_module
original_create_default_context = ssl_module.create_default_context
def create_unverified_context(*args, **kwargs):
context = original_create_default_context(*args, **kwargs)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
return context
# lark_oapi.ws.client captures the event loop at module-import time as a module-
# level global variable. When a previous ws thread is force-killed via ctypes its
# loop may still be marked as "running", which causes the next ws_client.start()
# call (in this new thread) to raise "This event loop is already running".
# Fix: replace the module-level loop with a brand-new, idle loop before starting.
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
import lark_oapi.ws.client as _lark_ws_client_mod
_lark_ws_client_mod.loop = loop
except Exception:
pass
startup_error = None
for attempt in range(2):
try:
if attempt == 1:
logger.warning("[FeiShu] Retrying with SSL verification disabled...")
ssl_module.create_default_context = create_unverified_context
ssl_module._create_unverified_context = create_unverified_context
ws_client = lark.ws.Client(
self.feishu_app_id,
self.feishu_app_secret,
event_handler=event_handler,
log_level=lark.LogLevel.WARNING
)
self._ws_client = ws_client
logger.debug("[FeiShu] Websocket client starting...")
ws_client.start()
break
except (SystemExit, KeyboardInterrupt):
logger.info("[FeiShu] Websocket thread received stop signal")
break
except Exception as e:
error_msg = str(e)
is_ssl_error = ("CERTIFICATE_VERIFY_FAILED" in error_msg
or "certificate verify failed" in error_msg.lower())
if is_ssl_error and attempt == 0:
logger.warning(f"[FeiShu] SSL error: {error_msg}, retrying...")
continue
logger.error(f"[FeiShu] Websocket client error: {e}", exc_info=True)
startup_error = error_msg
ssl_module.create_default_context = original_create_default_context
break
if startup_error:
self.report_startup_error(startup_error)
try:
loop.close()
except Exception:
pass
logger.info("[FeiShu] Websocket thread exited")
ws_thread = threading.Thread(target=start_client_with_retry, daemon=True)
self._ws_thread = ws_thread
ws_thread.start()
logger.info("[FeiShu] ✅ Websocket thread started, ready to receive messages")
ws_thread.join()
def _is_mention_bot(self, mentions: list) -> bool:
if self._bot_open_id:
return any(
m.get("id", {}).get("open_id") == self._bot_open_id
for m in mentions
)
bot_name = conf().get("feishu_bot_name")
if bot_name:
return any(m.get("name") == bot_name for m in mentions)
# Feishu event subscription only delivers messages that @-mention the bot,
# so reaching here means the bot was indeed mentioned.
return True
def _handle_message_event(self, event: dict):
if not event.get("message") or not event.get("sender"):
logger.warning(f"[FeiShu] invalid message, event={event}")
return
msg = event.get("message")
# 幂等判断
msg_id = msg.get("message_id")
if self.receivedMsgs.get(msg_id):
logger.warning(f"[FeiShu] repeat msg filtered, msg_id={msg_id}")
return
self.receivedMsgs[msg_id] = True
# Filter out stale messages from before channel startup (offline backlog)
import time as _time
create_time_ms = msg.get("create_time")
if create_time_ms:
msg_age_s = _time.time() - int(create_time_ms) / 1000
if msg_age_s > 60:
logger.warning(f"[FeiShu] stale msg filtered (age={msg_age_s:.0f}s), msg_id={msg_id}")
return
is_group = False
chat_type = msg.get("chat_type")
if chat_type == "group":
if not msg.get("mentions") and msg.get("message_type") == "text":
# 群聊中未@不响应
return
if msg.get("mentions") and msg.get("message_type") == "text":
if not self._is_mention_bot(msg.get("mentions")):
return
# 群聊
is_group = True
receive_id_type = "chat_id"
elif chat_type == "p2p":
receive_id_type = "open_id"
else:
logger.warning("[FeiShu] message ignore")
return
# 构造飞书消息对象
feishu_msg = FeishuMessage(event, is_group=is_group, access_token=self.fetch_access_token())
if not feishu_msg:
return
# 处理文件缓存逻辑
from channel.file_cache import get_file_cache
file_cache = get_file_cache()
# 获取 session_id(用于缓存关联)
if is_group:
if conf().get("group_shared_session", True):
session_id = msg.get("chat_id") # 群共享会话
else:
session_id = feishu_msg.from_user_id + "_" + msg.get("chat_id")
else:
session_id = feishu_msg.from_user_id
# 如果是单张图片消息,缓存起来
if feishu_msg.ctype == ContextType.IMAGE:
if hasattr(feishu_msg, 'image_path') and feishu_msg.image_path:
file_cache.add(session_id, feishu_msg.image_path, file_type='image')
logger.info(f"[FeiShu] Image cached for session {session_id}, waiting for user query...")
# 单张图片不直接处理,等待用户提问
return
# 如果是文本消息,检查是否有缓存的文件
if feishu_msg.ctype == ContextType.TEXT:
cached_files = file_cache.get(session_id)
if cached_files:
# 将缓存的文件附加到文本消息中
file_refs = []
for file_info in cached_files:
file_path = file_info['path']
file_type = file_info['type']
if file_type == 'image':
file_refs.append(f"[图片: {file_path}]")
elif file_type == 'video':
file_refs.append(f"[视频: {file_path}]")
else:
file_refs.append(f"[文件: {file_path}]")
feishu_msg.content = feishu_msg.content + "\n" + "\n".join(file_refs)
logger.info(f"[FeiShu] Attached {len(cached_files)} cached file(s) to user query")
# 清除缓存
file_cache.clear(session_id)
context = self._compose_context(
feishu_msg.ctype,
feishu_msg.content,
isgroup=is_group,
msg=feishu_msg,
receive_id_type=receive_id_type,
no_need_at=True
)
if context:
self.produce(context)
logger.debug(f"[FeiShu] query={feishu_msg.content}, type={feishu_msg.ctype}")
def send(self, reply: Reply, context: Context):
msg = context.get("msg")
is_group = context["isgroup"]
if msg:
access_token = msg.access_token
else:
access_token = self.fetch_access_token()
headers = {
"Authorization": "Bearer " + access_token,
"Content-Type": "application/json",
}
msg_type = "text"
logger.debug(f"[FeiShu] sending reply, type={context.type}, content={reply.content[:100]}...")
reply_content = reply.content
content_key = "text"
if reply.type == ReplyType.IMAGE_URL:
# 图片上传
reply_content = self._upload_image_url(reply.content, access_token)
if not reply_content:
logger.warning("[FeiShu] upload image failed")
return
msg_type = "image"
content_key = "image_key"
elif reply.type == ReplyType.FILE:
# 如果有附加的文本内容,先发送文本
if hasattr(reply, 'text_content') and reply.text_content:
logger.info(f"[FeiShu] Sending text before file: {reply.text_content[:50]}...")
text_reply = Reply(ReplyType.TEXT, reply.text_content)
self._send(text_reply, context)
import time
time.sleep(0.3) # 短暂延迟,确保文本先到达
# 判断是否为视频文件
file_path = reply.content
if file_path.startswith("file://"):
file_path = file_path[7:]
is_video = file_path.lower().endswith(('.mp4', '.avi', '.mov', '.wmv', '.flv'))
if is_video:
# 视频上传(包含duration信息)
upload_data = self._upload_video_url(reply.content, access_token)
if not upload_data or not upload_data.get('file_key'):
logger.warning("[FeiShu] upload video failed")
return
# 视频使用 media 类型(根据官方文档)
# 错误码 230055 说明:上传 mp4 时必须使用 msg_type="media"
msg_type = "media"
reply_content = upload_data # 完整的上传响应数据(包含file_key和duration)
logger.info(
f"[FeiShu] Sending video: file_key={upload_data.get('file_key')}, duration={upload_data.get('duration')}ms")
content_key = None # 直接序列化整个对象
else:
# 其他文件使用 file 类型
file_key = self._upload_file_url(reply.content, access_token)
if not file_key:
logger.warning("[FeiShu] upload file failed")
return
reply_content = file_key
msg_type = "file"
content_key = "file_key"
# Check if we can reply to an existing message (need msg_id)
can_reply = is_group and msg and hasattr(msg, 'msg_id') and msg.msg_id
# Build content JSON
content_json = json.dumps(reply_content) if content_key is None else json.dumps({content_key: reply_content})
logger.debug(f"[FeiShu] Sending message: msg_type={msg_type}, content={content_json[:200]}")
if can_reply:
# 群聊中回复已有消息
url = f"https://open.feishu.cn/open-apis/im/v1/messages/{msg.msg_id}/reply"
data = {
"msg_type": msg_type,
"content": content_json
}
res = requests.post(url=url, headers=headers, json=data, timeout=(5, 10))
else:
# 发送新消息(私聊或群聊中无msg_id的情况,如定时任务)
url = "https://open.feishu.cn/open-apis/im/v1/messages"
params = {"receive_id_type": context.get("receive_id_type") or "open_id"}
data = {
"receive_id": context.get("receiver"),
"msg_type": msg_type,
"content": content_json
}
res = requests.post(url=url, headers=headers, params=params, json=data, timeout=(5, 10))
res = res.json()
if res.get("code") == 0:
logger.info(f"[FeiShu] send message success")
else:
logger.error(f"[FeiShu] send message failed, code={res.get('code')}, msg={res.get('msg')}")
def fetch_access_token(self) -> str:
url = "https://open.feishu.cn/open-apis/auth/v3/tenant_access_token/internal/"
headers = {
"Content-Type": "application/json"
}
req_body = {
"app_id": self.feishu_app_id,
"app_secret": self.feishu_app_secret
}
data = bytes(json.dumps(req_body), encoding='utf8')
response = requests.post(url=url, data=data, headers=headers)
if response.status_code == 200:
res = response.json()
if res.get("code") != 0:
logger.error(f"[FeiShu] get tenant_access_token error, code={res.get('code')}, msg={res.get('msg')}")
return ""
else:
return res.get("tenant_access_token")
else:
logger.error(f"[FeiShu] fetch token error, res={response}")
def _upload_image_url(self, img_url, access_token):
logger.debug(f"[FeiShu] start process image, img_url={img_url}")
# Check if it's a local file path (file:// protocol)
if img_url.startswith("file://"):
local_path = img_url[7:] # Remove "file://" prefix
logger.info(f"[FeiShu] uploading local file: {local_path}")
if not os.path.exists(local_path):
logger.error(f"[FeiShu] local file not found: {local_path}")
return None
# Upload directly from local file
upload_url = "https://open.feishu.cn/open-apis/im/v1/images"
data = {'image_type': 'message'}
headers = {'Authorization': f'Bearer {access_token}'}
with open(local_path, "rb") as file:
upload_response = requests.post(upload_url, files={"image": file}, data=data, headers=headers)
logger.info(f"[FeiShu] upload file, res={upload_response.content}")
response_data = upload_response.json()
if response_data.get("code") == 0:
return response_data.get("data").get("image_key")
else:
logger.error(f"[FeiShu] upload failed: {response_data}")
return None
# Original logic for HTTP URLs
response = requests.get(img_url)
suffix = utils.get_path_suffix(img_url)
temp_name = str(uuid.uuid4()) + "." + suffix
if response.status_code == 200:
# 将图片内容保存为临时文件
with open(temp_name, "wb") as file:
file.write(response.content)
# upload
upload_url = "https://open.feishu.cn/open-apis/im/v1/images"
data = {
'image_type': 'message'
}
headers = {
'Authorization': f'Bearer {access_token}',
}
with open(temp_name, "rb") as file:
upload_response = requests.post(upload_url, files={"image": file}, data=data, headers=headers)
logger.info(f"[FeiShu] upload file, res={upload_response.content}")
os.remove(temp_name)
return upload_response.json().get("data").get("image_key")
def _get_video_duration(self, file_path: str) -> int:
try:
import subprocess
# 使用 ffprobe 获取视频时长
cmd = [
'ffprobe',
'-v', 'error',
'-show_entries', 'format=duration',
'-of', 'default=noprint_wrappers=1:nokey=1',
file_path
]
result = subprocess.run(cmd, capture_output=True, text=True, timeout=10)
if result.returncode == 0:
duration_seconds = float(result.stdout.strip())
duration_ms = int(duration_seconds * 1000)
logger.info(f"[FeiShu] Video duration: {duration_seconds:.2f}s ({duration_ms}ms)")
return duration_ms
else:
logger.warning(f"[FeiShu] Failed to get video duration via ffprobe: {result.stderr}")
return 0
except FileNotFoundError:
logger.warning("[FeiShu] ffprobe not found, video duration will be 0. Install ffmpeg to fix this.")
return 0
except Exception as e:
logger.warning(f"[FeiShu] Failed to get video duration: {e}")
return 0
def _upload_video_url(self, video_url, access_token):
local_path = None
temp_file = None
try:
# For file:// URLs (local files), upload directly
if video_url.startswith("file://"):
local_path = video_url[7:] # Remove file:// prefix
if not os.path.exists(local_path):
logger.error(f"[FeiShu] local video file not found: {local_path}")
return None
else:
# For HTTP URLs, download first
logger.info(f"[FeiShu] Downloading video from URL: {video_url}")
response = requests.get(video_url, timeout=(5, 60))
if response.status_code != 200:
logger.error(f"[FeiShu] download video failed, status={response.status_code}")
return None
# Save to temp file
import uuid
file_name = os.path.basename(video_url) or "video.mp4"
temp_file = str(uuid.uuid4()) + "_" + file_name
with open(temp_file, "wb") as file:
file.write(response.content)
logger.info(f"[FeiShu] Video downloaded, size={len(response.content)} bytes")
local_path = temp_file
# Get video duration
duration = self._get_video_duration(local_path)
# Upload to Feishu
file_name = os.path.basename(local_path)
file_ext = os.path.splitext(file_name)[1].lower()
file_type_map = {'.mp4': 'mp4'}
file_type = file_type_map.get(file_ext, 'mp4')
upload_url = "https://open.feishu.cn/open-apis/im/v1/files"
data = {
'file_type': file_type,
'file_name': file_name
}
# Add duration only if available (required for video/audio)
if duration:
data['duration'] = duration # Must be int, not string
headers = {'Authorization': f'Bearer {access_token}'}
logger.info(f"[FeiShu] Uploading video: file_name={file_name}, duration={duration}ms")
with open(local_path, "rb") as file:
upload_response = requests.post(
upload_url,
files={"file": file},
data=data,
headers=headers,
timeout=(5, 60)
)
logger.info(
f"[FeiShu] upload video response, status={upload_response.status_code}, res={upload_response.content}")
response_data = upload_response.json()
if response_data.get("code") == 0:
# Add duration to the response data (API doesn't return it)
upload_data = response_data.get("data")
upload_data['duration'] = duration # Add our calculated duration
logger.info(
f"[FeiShu] Upload complete: file_key={upload_data.get('file_key')}, duration={duration}ms")
return upload_data
else:
logger.error(f"[FeiShu] upload video failed: {response_data}")
return None
except Exception as e:
logger.error(f"[FeiShu] upload video exception: {e}")
return None
finally:
# Clean up temp file
if temp_file and os.path.exists(temp_file):
try:
os.remove(temp_file)
except Exception as e:
logger.warning(f"[FeiShu] Failed to remove temp file {temp_file}: {e}")
def _upload_file_url(self, file_url, access_token):
logger.debug(f"[FeiShu] start process file, file_url={file_url}")
# Check if it's a local file path (file:// protocol)
if file_url.startswith("file://"):
local_path = file_url[7:] # Remove "file://" prefix
logger.info(f"[FeiShu] uploading local file: {local_path}")
if not os.path.exists(local_path):
logger.error(f"[FeiShu] local file not found: {local_path}")
return None
# Get file info
file_name = os.path.basename(local_path)
file_ext = os.path.splitext(file_name)[1].lower()
# Determine file type for Feishu API
# Feishu supports: opus, mp4, pdf, doc, xls, ppt, stream (other types)
file_type_map = {
'.opus': 'opus',
'.mp4': 'mp4',
'.pdf': 'pdf',
'.doc': 'doc', '.docx': 'doc',
'.xls': 'xls', '.xlsx': 'xls',
'.ppt': 'ppt', '.pptx': 'ppt',
}
file_type = file_type_map.get(file_ext, 'stream') # Default to stream for other types
# Upload file to Feishu
upload_url = "https://open.feishu.cn/open-apis/im/v1/files"
data = {'file_type': file_type, 'file_name': file_name}
headers = {'Authorization': f'Bearer {access_token}'}
try:
with open(local_path, "rb") as file:
upload_response = requests.post(
upload_url,
files={"file": file},
data=data,
headers=headers,
timeout=(5, 30) # 5s connect, 30s read timeout
)
logger.info(
f"[FeiShu] upload file response, status={upload_response.status_code}, res={upload_response.content}")
response_data = upload_response.json()
if response_data.get("code") == 0:
return response_data.get("data").get("file_key")
else:
logger.error(f"[FeiShu] upload file failed: {response_data}")
return None
except Exception as e:
logger.error(f"[FeiShu] upload file exception: {e}")
return None
# For HTTP URLs, download first then upload
try:
response = requests.get(file_url, timeout=(5, 30))
if response.status_code != 200:
logger.error(f"[FeiShu] download file failed, status={response.status_code}")
return None
# Save to temp file
import uuid
file_name = os.path.basename(file_url)
temp_name = str(uuid.uuid4()) + "_" + file_name
with open(temp_name, "wb") as file:
file.write(response.content)
# Upload
file_ext = os.path.splitext(file_name)[1].lower()
file_type_map = {
'.opus': 'opus', '.mp4': 'mp4', '.pdf': 'pdf',
'.doc': 'doc', '.docx': 'doc',
'.xls': 'xls', '.xlsx': 'xls',
'.ppt': 'ppt', '.pptx': 'ppt',
}
file_type = file_type_map.get(file_ext, 'stream')
upload_url = "https://open.feishu.cn/open-apis/im/v1/files"
data = {'file_type': file_type, 'file_name': file_name}
headers = {'Authorization': f'Bearer {access_token}'}
with open(temp_name, "rb") as file:
upload_response = requests.post(upload_url, files={"file": file}, data=data, headers=headers)
logger.info(f"[FeiShu] upload file, res={upload_response.content}")
response_data = upload_response.json()
os.remove(temp_name) # Clean up temp file
if response_data.get("code") == 0:
return response_data.get("data").get("file_key")
else:
logger.error(f"[FeiShu] upload file failed: {response_data}")
return None
except Exception as e:
logger.error(f"[FeiShu] upload file from URL exception: {e}")
return None
def _compose_context(self, ctype: ContextType, content, **kwargs):
context = Context(ctype, content)
context.kwargs = kwargs
if "channel_type" not in context:
context["channel_type"] = self.channel_type
if "origin_ctype" not in context:
context["origin_ctype"] = ctype
cmsg = context["msg"]
# Set session_id based on chat type
if cmsg.is_group:
# Group chat: check if group_shared_session is enabled
if conf().get("group_shared_session", True):
# All users in the group share the same session context
context["session_id"] = cmsg.other_user_id # group_id
else:
# Each user has their own session within the group
# This ensures:
# - Same user in different groups have separate conversation histories
# - Same user in private chat and group chat have separate histories
context["session_id"] = f"{cmsg.from_user_id}:{cmsg.other_user_id}"
else:
# Private chat: use user_id only
context["session_id"] = cmsg.from_user_id
context["receiver"] = cmsg.other_user_id
if ctype == ContextType.TEXT:
# 1.文本请求
# 图片生成处理
img_match_prefix = check_prefix(content, conf().get("image_create_prefix"))
if img_match_prefix:
content = content.replace(img_match_prefix, "", 1)
context.type = ContextType.IMAGE_CREATE
else:
context.type = ContextType.TEXT
context.content = content.strip()
elif context.type == ContextType.VOICE:
# 2.语音请求
if "desire_rtype" not in context and conf().get("voice_reply_voice"):
context["desire_rtype"] = ReplyType.VOICE
return context
class FeishuController:
# 类常量
FAILED_MSG = '{"success": false}'
SUCCESS_MSG = '{"success": true}'
MESSAGE_RECEIVE_TYPE = "im.message.receive_v1"
def GET(self):
return "Feishu service start success!"
def POST(self):
try:
channel = FeiShuChanel()
request = json.loads(web.data().decode("utf-8"))
logger.debug(f"[FeiShu] receive request: {request}")
# 1.事件订阅回调验证
if request.get("type") == URL_VERIFICATION:
varify_res = {"challenge": request.get("challenge")}
return json.dumps(varify_res)
# 2.消息接收处理
# token 校验
header = request.get("header")
if not header or header.get("token") != channel.feishu_token:
return self.FAILED_MSG
# 处理消息事件
event = request.get("event")
if header.get("event_type") == self.MESSAGE_RECEIVE_TYPE and event:
channel._handle_message_event(event)
return self.SUCCESS_MSG
except Exception as e:
logger.error(e)
return self.FAILED_MSG | --- +++ @@ -1,3 +1,15 @@+"""
+飞书通道接入
+
+支持两种事件接收模式:
+1. webhook模式: 通过HTTP服务器接收事件(需要公网IP)
+2. websocket模式: 通过长连接接收事件(本地开发友好)
+
+通过配置项 feishu_event_mode 选择模式: "webhook" 或 "websocket"
+
+@author Saboteur7
+@Date 2023/11/19
+"""
import importlib.util
import json
@@ -35,6 +47,7 @@
def _ensure_lark_imported():
+ """Import lark_oapi on first use (takes 4-10s due to 10k+ source files)."""
global lark
if lark is None:
import lark_oapi as _lark
@@ -80,6 +93,7 @@ self._startup_webhook()
def _fetch_bot_open_id(self):
+ """Fetch the bot's own open_id via API so we can match @-mentions without feishu_bot_name."""
try:
access_token = self.fetch_access_token()
if not access_token:
@@ -129,6 +143,7 @@ logger.info("[FeiShu] stop() completed")
def _startup_webhook(self):
+ """启动HTTP服务器接收事件(webhook模式)"""
logger.debug("[FeiShu] Starting in webhook mode...")
urls = (
'/', 'channel.feishu.feishu_channel.FeishuController'
@@ -145,11 +160,13 @@ server.stop()
def _startup_websocket(self):
+ """启动长连接接收事件(websocket模式)"""
_ensure_lark_imported()
logger.debug("[FeiShu] Starting in websocket mode...")
# 创建事件处理器
def handle_message_event(data: lark.im.v1.P2ImMessageReceiveV1) -> None:
+ """处理接收消息事件 v2.0"""
try:
event_dict = json.loads(lark.JSON.marshal(data))
event = event_dict.get("event", {})
@@ -173,6 +190,7 @@ .build()
def start_client_with_retry():
+ """Run ws client in this thread with its own event loop to avoid conflicts."""
import asyncio
import ssl as ssl_module
original_create_default_context = ssl_module.create_default_context
@@ -244,6 +262,14 @@ ws_thread.join()
def _is_mention_bot(self, mentions: list) -> bool:
+ """Check whether any mention in the list refers to this bot.
+
+ Priority:
+ 1. Match by open_id (obtained from /bot/v3/info at startup, no config needed)
+ 2. Fallback to feishu_bot_name config for backward compatibility
+ 3. If neither is available, assume the first mention is the bot (Feishu only
+ delivers group messages that @-mention the bot, so this is usually correct)
+ """
if self._bot_open_id:
return any(
m.get("id", {}).get("open_id") == self._bot_open_id
@@ -257,6 +283,10 @@ return True
def _handle_message_event(self, event: dict):
+ """
+ 处理消息事件的核心逻辑
+ webhook和websocket模式共用此方法
+ """
if not event.get("message") or not event.get("sender"):
logger.warning(f"[FeiShu] invalid message, event={event}")
return
@@ -524,6 +554,15 @@ return upload_response.json().get("data").get("image_key")
def _get_video_duration(self, file_path: str) -> int:
+ """
+ 获取视频时长(毫秒)
+
+ Args:
+ file_path: 视频文件路径
+
+ Returns:
+ 视频时长(毫秒),如果获取失败返回0
+ """
try:
import subprocess
@@ -553,6 +592,15 @@ return 0
def _upload_video_url(self, video_url, access_token):
+ """
+ Upload video to Feishu and return video info (file_key and duration)
+ Supports:
+ - file:// URLs for local files
+ - http(s):// URLs (download then upload)
+
+ Returns:
+ dict with 'file_key' and 'duration' (milliseconds), or None if failed
+ """
local_path = None
temp_file = None
@@ -640,6 +688,10 @@ logger.warning(f"[FeiShu] Failed to remove temp file {temp_file}: {e}")
def _upload_file_url(self, file_url, access_token):
+ """
+ Upload file to Feishu
+ Supports both local files (file://) and HTTP URLs
+ """
logger.debug(f"[FeiShu] start process file, file_url={file_url}")
# Check if it's a local file path (file:// protocol)
@@ -787,6 +839,9 @@
class FeishuController:
+ """
+ HTTP服务器控制器,用于webhook模式
+ """
# 类常量
FAILED_MSG = '{"success": false}'
SUCCESS_MSG = '{"success": true}'
@@ -822,4 +877,4 @@
except Exception as e:
logger.error(e)
- return self.FAILED_MSG+ return self.FAILED_MSG
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/channel/feishu/feishu_channel.py |
Add docstrings that explain logic |
import os
import shutil
import zipfile
import tempfile
from typing import Dict, List, Optional
from common.log import logger
from agent.skills.types import Skill, SkillEntry
from agent.skills.manager import SkillManager
try:
import requests
except ImportError:
requests = None
class SkillService:
def __init__(self, skill_manager: SkillManager):
self.manager = skill_manager
# ------------------------------------------------------------------
# query
# ------------------------------------------------------------------
def query(self) -> List[dict]:
self.manager.refresh_skills()
config = self.manager.get_skills_config()
result = list(config.values())
logger.info(f"[SkillService] query: {len(result)} skills found")
return result
# ------------------------------------------------------------------
# add / install
# ------------------------------------------------------------------
def add(self, payload: dict) -> None:
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
payload_type = payload.get("type", "url")
if payload_type == "package":
self._add_package(name, payload)
else:
self._add_url(name, payload)
self.manager.refresh_skills()
category = payload.get("category")
if category and name in self.manager.skills_config:
self.manager.skills_config[name]["category"] = category
self.manager._save_skills_config()
def _add_url(self, name: str, payload: dict) -> None:
files = payload.get("files", [])
if not files:
raise ValueError("skill files list is empty")
skill_dir = os.path.join(self.manager.custom_dir, name)
tmp_dir = skill_dir + ".tmp"
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
os.makedirs(tmp_dir, exist_ok=True)
try:
for file_info in files:
url = file_info.get("url")
rel_path = file_info.get("path")
if not url or not rel_path:
logger.warning(f"[SkillService] add: skip invalid file entry {file_info}")
continue
dest = os.path.join(tmp_dir, rel_path)
self._download_file(url, dest)
except Exception:
shutil.rmtree(tmp_dir, ignore_errors=True)
raise
if os.path.exists(skill_dir):
shutil.rmtree(skill_dir)
os.rename(tmp_dir, skill_dir)
logger.info(f"[SkillService] add: skill '{name}' installed via url ({len(files)} files)")
def _add_package(self, name: str, payload: dict) -> None:
files = payload.get("files", [])
if not files or not files[0].get("url"):
raise ValueError("package url is required")
url = files[0]["url"]
skill_dir = os.path.join(self.manager.custom_dir, name)
with tempfile.TemporaryDirectory() as tmp_dir:
zip_path = os.path.join(tmp_dir, "package.zip")
self._download_file(url, zip_path)
if not zipfile.is_zipfile(zip_path):
raise ValueError(f"downloaded file is not a valid zip archive: {url}")
extract_dir = os.path.join(tmp_dir, "extracted")
with zipfile.ZipFile(zip_path, "r") as zf:
zf.extractall(extract_dir)
# Determine the actual content root.
# If the zip has a single top-level directory, use its contents
# so the skill folder is clean (no extra nesting).
top_items = [
item for item in os.listdir(extract_dir)
if not item.startswith(".")
]
if len(top_items) == 1:
single = os.path.join(extract_dir, top_items[0])
if os.path.isdir(single):
extract_dir = single
if os.path.exists(skill_dir):
shutil.rmtree(skill_dir)
shutil.copytree(extract_dir, skill_dir)
logger.info(f"[SkillService] add: skill '{name}' installed via package ({url})")
# ------------------------------------------------------------------
# open / close (enable / disable)
# ------------------------------------------------------------------
def open(self, payload: dict) -> None:
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
self.manager.set_skill_enabled(name, enabled=True)
logger.info(f"[SkillService] open: skill '{name}' enabled")
def close(self, payload: dict) -> None:
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
self.manager.set_skill_enabled(name, enabled=False)
logger.info(f"[SkillService] close: skill '{name}' disabled")
# ------------------------------------------------------------------
# delete
# ------------------------------------------------------------------
def delete(self, payload: dict) -> None:
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
skill_dir = os.path.join(self.manager.custom_dir, name)
if os.path.exists(skill_dir):
shutil.rmtree(skill_dir)
logger.info(f"[SkillService] delete: removed directory {skill_dir}")
else:
logger.warning(f"[SkillService] delete: skill directory not found: {skill_dir}")
# Refresh will remove the deleted skill from config automatically
self.manager.refresh_skills()
logger.info(f"[SkillService] delete: skill '{name}' deleted")
# ------------------------------------------------------------------
# dispatch - single entry point for protocol messages
# ------------------------------------------------------------------
def dispatch(self, action: str, payload: Optional[dict] = None) -> dict:
payload = payload or {}
try:
if action == "query":
result_payload = self.query()
return {"action": action, "code": 200, "message": "success", "payload": result_payload}
elif action == "add":
self.add(payload)
elif action == "open":
self.open(payload)
elif action == "close":
self.close(payload)
elif action == "delete":
self.delete(payload)
else:
return {"action": action, "code": 400, "message": f"unknown action: {action}", "payload": None}
return {"action": action, "code": 200, "message": "success", "payload": None}
except Exception as e:
logger.error(f"[SkillService] dispatch error: action={action}, error={e}")
return {"action": action, "code": 500, "message": str(e), "payload": None}
# ------------------------------------------------------------------
# internal helpers
# ------------------------------------------------------------------
@staticmethod
def _download_file(url: str, dest: str):
if requests is None:
raise RuntimeError("requests library is required for downloading skill files")
dest_dir = os.path.dirname(dest)
if dest_dir:
os.makedirs(dest_dir, exist_ok=True)
resp = requests.get(url, timeout=60)
resp.raise_for_status()
with open(dest, "wb") as f:
f.write(resp.content)
logger.debug(f"[SkillService] downloaded {url} -> {dest}") | --- +++ @@ -1,3 +1,10 @@+"""
+Skill service for handling skill CRUD operations.
+
+This service provides a unified interface for managing skills, which can be
+called from the cloud control client (LinkAI), the local web console, or any
+other management entry point.
+"""
import os
import shutil
@@ -15,14 +22,28 @@
class SkillService:
+ """
+ High-level service for skill lifecycle management.
+ Wraps SkillManager and provides network-aware operations such as
+ downloading skill files from remote URLs.
+ """
def __init__(self, skill_manager: SkillManager):
+ """
+ :param skill_manager: The SkillManager instance to operate on
+ """
self.manager = skill_manager
# ------------------------------------------------------------------
# query
# ------------------------------------------------------------------
def query(self) -> List[dict]:
+ """
+ Query all skills and return a serialisable list.
+ Reads from skills_config.json (refreshes from disk if needed).
+
+ :return: list of skill info dicts
+ """
self.manager.refresh_skills()
config = self.manager.get_skills_config()
result = list(config.values())
@@ -33,6 +54,35 @@ # add / install
# ------------------------------------------------------------------
def add(self, payload: dict) -> None:
+ """
+ Add (install) a skill from a remote payload.
+
+ Supported payload types:
+
+ 1. ``type: "url"`` – download individual files::
+
+ {
+ "name": "web_search",
+ "type": "url",
+ "enabled": true,
+ "files": [
+ {"url": "https://...", "path": "README.md"},
+ {"url": "https://...", "path": "scripts/main.py"}
+ ]
+ }
+
+ 2. ``type: "package"`` – download a zip archive and extract::
+
+ {
+ "name": "plugin-custom-tool",
+ "type": "package",
+ "category": "skills",
+ "enabled": true,
+ "files": [{"url": "https://cdn.example.com/skills/custom-tool.zip"}]
+ }
+
+ :param payload: skill add payload from server
+ """
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
@@ -52,6 +102,7 @@ self.manager._save_skills_config()
def _add_url(self, name: str, payload: dict) -> None:
+ """Install a skill by downloading individual files."""
files = payload.get("files", [])
if not files:
raise ValueError("skill files list is empty")
@@ -83,6 +134,13 @@ logger.info(f"[SkillService] add: skill '{name}' installed via url ({len(files)} files)")
def _add_package(self, name: str, payload: dict) -> None:
+ """
+ Install a skill by downloading a zip archive and extracting it.
+
+ If the archive contains a single top-level directory, that directory
+ is used as the skill folder directly; otherwise a new directory named
+ after the skill is created to hold the extracted contents.
+ """
files = payload.get("files", [])
if not files or not files[0].get("url"):
raise ValueError("package url is required")
@@ -123,6 +181,11 @@ # open / close (enable / disable)
# ------------------------------------------------------------------
def open(self, payload: dict) -> None:
+ """
+ Enable a skill by name.
+
+ :param payload: {"name": "skill_name"}
+ """
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
@@ -130,6 +193,11 @@ logger.info(f"[SkillService] open: skill '{name}' enabled")
def close(self, payload: dict) -> None:
+ """
+ Disable a skill by name.
+
+ :param payload: {"name": "skill_name"}
+ """
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
@@ -140,6 +208,11 @@ # delete
# ------------------------------------------------------------------
def delete(self, payload: dict) -> None:
+ """
+ Delete a skill by removing its directory entirely.
+
+ :param payload: {"name": "skill_name"}
+ """
name = payload.get("name")
if not name:
raise ValueError("skill name is required")
@@ -159,6 +232,14 @@ # dispatch - single entry point for protocol messages
# ------------------------------------------------------------------
def dispatch(self, action: str, payload: Optional[dict] = None) -> dict:
+ """
+ Dispatch a skill management action and return a protocol-compatible
+ response dict.
+
+ :param action: one of query / add / open / close / delete
+ :param payload: action-specific payload (may be None for query)
+ :return: dict with action, code, message, payload
+ """
payload = payload or {}
try:
if action == "query":
@@ -184,6 +265,12 @@ # ------------------------------------------------------------------
@staticmethod
def _download_file(url: str, dest: str):
+ """
+ Download a file from *url* and save to *dest*.
+
+ :param url: remote file URL
+ :param dest: local destination path
+ """
if requests is None:
raise RuntimeError("requests library is required for downloading skill files")
@@ -195,4 +282,4 @@ resp.raise_for_status()
with open(dest, "wb") as f:
f.write(resp.content)
- logger.debug(f"[SkillService] downloaded {url} -> {dest}")+ logger.debug(f"[SkillService] downloaded {url} -> {dest}")
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/skills/service.py |
Create structured documentation for my script |
import json
import os
import threading
from datetime import datetime
from typing import Dict, List, Optional
from pathlib import Path
from common.utils import expand_path
class TaskStore:
def __init__(self, store_path: str = None):
if store_path is None:
# Default to ~/cow/scheduler/tasks.json
home = expand_path("~")
store_path = os.path.join(home, "cow", "scheduler", "tasks.json")
self.store_path = store_path
self.lock = threading.Lock()
self._ensure_store_dir()
def _ensure_store_dir(self):
store_dir = os.path.dirname(self.store_path)
os.makedirs(store_dir, exist_ok=True)
def load_tasks(self) -> Dict[str, dict]:
with self.lock:
if not os.path.exists(self.store_path):
return {}
try:
with open(self.store_path, 'r', encoding='utf-8') as f:
data = json.load(f)
return data.get("tasks", {})
except Exception as e:
print(f"Error loading tasks: {e}")
return {}
def save_tasks(self, tasks: Dict[str, dict]):
with self.lock:
try:
# Create backup
if os.path.exists(self.store_path):
backup_path = f"{self.store_path}.bak"
try:
with open(self.store_path, 'r') as src:
with open(backup_path, 'w') as dst:
dst.write(src.read())
except Exception:
pass
# Save tasks
data = {
"version": 1,
"updated_at": datetime.now().isoformat(),
"tasks": tasks
}
with open(self.store_path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=2)
except Exception as e:
print(f"Error saving tasks: {e}")
raise
def add_task(self, task: dict) -> bool:
tasks = self.load_tasks()
task_id = task.get("id")
if not task_id:
raise ValueError("Task must have an 'id' field")
if task_id in tasks:
raise ValueError(f"Task with id '{task_id}' already exists")
tasks[task_id] = task
self.save_tasks(tasks)
return True
def update_task(self, task_id: str, updates: dict) -> bool:
tasks = self.load_tasks()
if task_id not in tasks:
raise ValueError(f"Task '{task_id}' not found")
# Update fields
tasks[task_id].update(updates)
tasks[task_id]["updated_at"] = datetime.now().isoformat()
self.save_tasks(tasks)
return True
def delete_task(self, task_id: str) -> bool:
tasks = self.load_tasks()
if task_id not in tasks:
raise ValueError(f"Task '{task_id}' not found")
del tasks[task_id]
self.save_tasks(tasks)
return True
def get_task(self, task_id: str) -> Optional[dict]:
tasks = self.load_tasks()
return tasks.get(task_id)
def list_tasks(self, enabled_only: bool = False) -> List[dict]:
tasks = self.load_tasks()
task_list = list(tasks.values())
if enabled_only:
task_list = [t for t in task_list if t.get("enabled", True)]
# Sort by next_run_at
task_list.sort(key=lambda t: t.get("next_run_at", float('inf')))
return task_list
def enable_task(self, task_id: str, enabled: bool = True) -> bool:
return self.update_task(task_id, {"enabled": enabled}) | --- +++ @@ -1,3 +1,6 @@+"""
+Task storage management for scheduler
+"""
import json
import os
@@ -9,8 +12,17 @@
class TaskStore:
+ """
+ Manages persistent storage of scheduled tasks
+ """
def __init__(self, store_path: str = None):
+ """
+ Initialize task store
+
+ Args:
+ store_path: Path to tasks.json file. Defaults to ~/cow/scheduler/tasks.json
+ """
if store_path is None:
# Default to ~/cow/scheduler/tasks.json
home = expand_path("~")
@@ -21,10 +33,17 @@ self._ensure_store_dir()
def _ensure_store_dir(self):
+ """Ensure the storage directory exists"""
store_dir = os.path.dirname(self.store_path)
os.makedirs(store_dir, exist_ok=True)
def load_tasks(self) -> Dict[str, dict]:
+ """
+ Load all tasks from storage
+
+ Returns:
+ Dictionary of task_id -> task_data
+ """
with self.lock:
if not os.path.exists(self.store_path):
return {}
@@ -38,6 +57,12 @@ return {}
def save_tasks(self, tasks: Dict[str, dict]):
+ """
+ Save all tasks to storage
+
+ Args:
+ tasks: Dictionary of task_id -> task_data
+ """
with self.lock:
try:
# Create backup
@@ -64,6 +89,15 @@ raise
def add_task(self, task: dict) -> bool:
+ """
+ Add a new task
+
+ Args:
+ task: Task data dictionary
+
+ Returns:
+ True if successful
+ """
tasks = self.load_tasks()
task_id = task.get("id")
@@ -78,6 +112,16 @@ return True
def update_task(self, task_id: str, updates: dict) -> bool:
+ """
+ Update an existing task
+
+ Args:
+ task_id: Task ID
+ updates: Dictionary of fields to update
+
+ Returns:
+ True if successful
+ """
tasks = self.load_tasks()
if task_id not in tasks:
@@ -91,6 +135,15 @@ return True
def delete_task(self, task_id: str) -> bool:
+ """
+ Delete a task
+
+ Args:
+ task_id: Task ID
+
+ Returns:
+ True if successful
+ """
tasks = self.load_tasks()
if task_id not in tasks:
@@ -101,10 +154,28 @@ return True
def get_task(self, task_id: str) -> Optional[dict]:
+ """
+ Get a specific task
+
+ Args:
+ task_id: Task ID
+
+ Returns:
+ Task data or None if not found
+ """
tasks = self.load_tasks()
return tasks.get(task_id)
def list_tasks(self, enabled_only: bool = False) -> List[dict]:
+ """
+ List all tasks
+
+ Args:
+ enabled_only: If True, only return enabled tasks
+
+ Returns:
+ List of task dictionaries
+ """
tasks = self.load_tasks()
task_list = list(tasks.values())
@@ -117,4 +188,14 @@ return task_list
def enable_task(self, task_id: str, enabled: bool = True) -> bool:
- return self.update_task(task_id, {"enabled": enabled})+ """
+ Enable or disable a task
+
+ Args:
+ task_id: Task ID
+ enabled: True to enable, False to disable
+
+ Returns:
+ True if successful
+ """
+ return self.update_task(task_id, {"enabled": enabled})
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/tools/scheduler/task_store.py |
Document my Python code with docstrings |
from __future__ import annotations
import sqlite3
import json
import hashlib
from typing import List, Dict, Optional, Any
from pathlib import Path
from dataclasses import dataclass
@dataclass
class MemoryChunk:
id: str
user_id: Optional[str]
scope: str # "shared" | "user" | "session"
source: str # "memory" | "session"
path: str
start_line: int
end_line: int
text: str
embedding: Optional[List[float]]
hash: str
metadata: Optional[Dict[str, Any]] = None
@dataclass
class SearchResult:
path: str
start_line: int
end_line: int
score: float
snippet: str
source: str
user_id: Optional[str] = None
class MemoryStorage:
def __init__(self, db_path: Path):
self.db_path = db_path
self.conn: Optional[sqlite3.Connection] = None
self.fts5_available = False # Track FTS5 availability
self._init_db()
def _check_fts5_support(self) -> bool:
try:
self.conn.execute("CREATE VIRTUAL TABLE IF NOT EXISTS fts5_test USING fts5(test)")
self.conn.execute("DROP TABLE IF EXISTS fts5_test")
return True
except sqlite3.OperationalError as e:
if "no such module: fts5" in str(e):
return False
raise
def _init_db(self):
try:
self.conn = sqlite3.connect(str(self.db_path), check_same_thread=False)
self.conn.row_factory = sqlite3.Row
# Check FTS5 support
self.fts5_available = self._check_fts5_support()
if not self.fts5_available:
from common.log import logger
logger.debug("[MemoryStorage] FTS5 not available, using LIKE-based keyword search")
# Check database integrity
try:
result = self.conn.execute("PRAGMA integrity_check").fetchone()
if result[0] != 'ok':
print(f"⚠️ Database integrity check failed: {result[0]}")
print(f" Recreating database...")
self.conn.close()
self.conn = None
# Remove corrupted database
self.db_path.unlink(missing_ok=True)
# Remove WAL files
Path(str(self.db_path) + '-wal').unlink(missing_ok=True)
Path(str(self.db_path) + '-shm').unlink(missing_ok=True)
# Reconnect to create new database
self.conn = sqlite3.connect(str(self.db_path), check_same_thread=False)
self.conn.row_factory = sqlite3.Row
except sqlite3.DatabaseError:
# Database is corrupted, recreate it
print(f"⚠️ Database is corrupted, recreating...")
if self.conn:
self.conn.close()
self.conn = None
self.db_path.unlink(missing_ok=True)
Path(str(self.db_path) + '-wal').unlink(missing_ok=True)
Path(str(self.db_path) + '-shm').unlink(missing_ok=True)
self.conn = sqlite3.connect(str(self.db_path), check_same_thread=False)
self.conn.row_factory = sqlite3.Row
# Enable WAL mode for better concurrency
self.conn.execute("PRAGMA journal_mode=WAL")
# Set busy timeout to avoid "database is locked" errors
self.conn.execute("PRAGMA busy_timeout=5000")
except Exception as e:
print(f"⚠️ Unexpected error during database initialization: {e}")
raise
# Create chunks table with embeddings
self.conn.execute("""
CREATE TABLE IF NOT EXISTS chunks (
id TEXT PRIMARY KEY,
user_id TEXT,
scope TEXT NOT NULL DEFAULT 'shared',
source TEXT NOT NULL DEFAULT 'memory',
path TEXT NOT NULL,
start_line INTEGER NOT NULL,
end_line INTEGER NOT NULL,
text TEXT NOT NULL,
embedding TEXT,
hash TEXT NOT NULL,
metadata TEXT,
created_at INTEGER DEFAULT (strftime('%s', 'now')),
updated_at INTEGER DEFAULT (strftime('%s', 'now'))
)
""")
# Create indexes
self.conn.execute("""
CREATE INDEX IF NOT EXISTS idx_chunks_user
ON chunks(user_id)
""")
self.conn.execute("""
CREATE INDEX IF NOT EXISTS idx_chunks_scope
ON chunks(scope)
""")
self.conn.execute("""
CREATE INDEX IF NOT EXISTS idx_chunks_hash
ON chunks(path, hash)
""")
# Create FTS5 virtual table for keyword search (only if supported)
if self.fts5_available:
# Use default unicode61 tokenizer (stable and compatible)
# For CJK support, we'll use LIKE queries as fallback
self.conn.execute("""
CREATE VIRTUAL TABLE IF NOT EXISTS chunks_fts USING fts5(
text,
id UNINDEXED,
user_id UNINDEXED,
path UNINDEXED,
source UNINDEXED,
scope UNINDEXED,
content='chunks',
content_rowid='rowid'
)
""")
# Create triggers to keep FTS in sync
self.conn.execute("""
CREATE TRIGGER IF NOT EXISTS chunks_ai AFTER INSERT ON chunks BEGIN
INSERT INTO chunks_fts(rowid, text, id, user_id, path, source, scope)
VALUES (new.rowid, new.text, new.id, new.user_id, new.path, new.source, new.scope);
END
""")
self.conn.execute("""
CREATE TRIGGER IF NOT EXISTS chunks_ad AFTER DELETE ON chunks BEGIN
DELETE FROM chunks_fts WHERE rowid = old.rowid;
END
""")
self.conn.execute("""
CREATE TRIGGER IF NOT EXISTS chunks_au AFTER UPDATE ON chunks BEGIN
UPDATE chunks_fts SET text = new.text, id = new.id,
user_id = new.user_id, path = new.path, source = new.source, scope = new.scope
WHERE rowid = new.rowid;
END
""")
# Create files metadata table
self.conn.execute("""
CREATE TABLE IF NOT EXISTS files (
path TEXT PRIMARY KEY,
source TEXT NOT NULL DEFAULT 'memory',
hash TEXT NOT NULL,
mtime INTEGER NOT NULL,
size INTEGER NOT NULL,
updated_at INTEGER DEFAULT (strftime('%s', 'now'))
)
""")
self.conn.commit()
def save_chunk(self, chunk: MemoryChunk):
self.conn.execute("""
INSERT OR REPLACE INTO chunks
(id, user_id, scope, source, path, start_line, end_line, text, embedding, hash, metadata, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, strftime('%s', 'now'))
""", (
chunk.id,
chunk.user_id,
chunk.scope,
chunk.source,
chunk.path,
chunk.start_line,
chunk.end_line,
chunk.text,
json.dumps(chunk.embedding) if chunk.embedding else None,
chunk.hash,
json.dumps(chunk.metadata) if chunk.metadata else None
))
self.conn.commit()
def save_chunks_batch(self, chunks: List[MemoryChunk]):
self.conn.executemany("""
INSERT OR REPLACE INTO chunks
(id, user_id, scope, source, path, start_line, end_line, text, embedding, hash, metadata, updated_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, strftime('%s', 'now'))
""", [
(
c.id, c.user_id, c.scope, c.source, c.path,
c.start_line, c.end_line, c.text,
json.dumps(c.embedding) if c.embedding else None,
c.hash,
json.dumps(c.metadata) if c.metadata else None
)
for c in chunks
])
self.conn.commit()
def get_chunk(self, chunk_id: str) -> Optional[MemoryChunk]:
row = self.conn.execute("""
SELECT * FROM chunks WHERE id = ?
""", (chunk_id,)).fetchone()
if not row:
return None
return self._row_to_chunk(row)
def search_vector(
self,
query_embedding: List[float],
user_id: Optional[str] = None,
scopes: List[str] = None,
limit: int = 10
) -> List[SearchResult]:
if scopes is None:
scopes = ["shared"]
if user_id:
scopes.append("user")
# Build query
scope_placeholders = ','.join('?' * len(scopes))
params = scopes
if user_id:
query = f"""
SELECT * FROM chunks
WHERE scope IN ({scope_placeholders})
AND (scope = 'shared' OR user_id = ?)
AND embedding IS NOT NULL
"""
params.append(user_id)
else:
query = f"""
SELECT * FROM chunks
WHERE scope IN ({scope_placeholders})
AND embedding IS NOT NULL
"""
rows = self.conn.execute(query, params).fetchall()
# Calculate cosine similarity
results = []
for row in rows:
embedding = json.loads(row['embedding'])
similarity = self._cosine_similarity(query_embedding, embedding)
if similarity > 0:
results.append((similarity, row))
# Sort by similarity and limit
results.sort(key=lambda x: x[0], reverse=True)
results = results[:limit]
return [
SearchResult(
path=row['path'],
start_line=row['start_line'],
end_line=row['end_line'],
score=score,
snippet=self._truncate_text(row['text'], 500),
source=row['source'],
user_id=row['user_id']
)
for score, row in results
]
def search_keyword(
self,
query: str,
user_id: Optional[str] = None,
scopes: List[str] = None,
limit: int = 10
) -> List[SearchResult]:
if scopes is None:
scopes = ["shared"]
if user_id:
scopes.append("user")
# Try FTS5 search first (if available)
if self.fts5_available:
fts_results = self._search_fts5(query, user_id, scopes, limit)
if fts_results:
return fts_results
# Fallback to LIKE search (always for CJK, or if FTS5 not available)
if not self.fts5_available or MemoryStorage._contains_cjk(query):
return self._search_like(query, user_id, scopes, limit)
return []
def _search_fts5(
self,
query: str,
user_id: Optional[str],
scopes: List[str],
limit: int
) -> List[SearchResult]:
fts_query = self._build_fts_query(query)
if not fts_query:
return []
scope_placeholders = ','.join('?' * len(scopes))
params = [fts_query] + scopes
if user_id:
sql_query = f"""
SELECT chunks.*, bm25(chunks_fts) as rank
FROM chunks_fts
JOIN chunks ON chunks.id = chunks_fts.id
WHERE chunks_fts MATCH ?
AND chunks.scope IN ({scope_placeholders})
AND (chunks.scope = 'shared' OR chunks.user_id = ?)
ORDER BY rank
LIMIT ?
"""
params.extend([user_id, limit])
else:
sql_query = f"""
SELECT chunks.*, bm25(chunks_fts) as rank
FROM chunks_fts
JOIN chunks ON chunks.id = chunks_fts.id
WHERE chunks_fts MATCH ?
AND chunks.scope IN ({scope_placeholders})
ORDER BY rank
LIMIT ?
"""
params.append(limit)
try:
rows = self.conn.execute(sql_query, params).fetchall()
return [
SearchResult(
path=row['path'],
start_line=row['start_line'],
end_line=row['end_line'],
score=self._bm25_rank_to_score(row['rank']),
snippet=self._truncate_text(row['text'], 500),
source=row['source'],
user_id=row['user_id']
)
for row in rows
]
except Exception:
return []
def _search_like(
self,
query: str,
user_id: Optional[str],
scopes: List[str],
limit: int
) -> List[SearchResult]:
import re
# Extract CJK words (2+ characters)
cjk_words = re.findall(r'[\u4e00-\u9fff]{2,}', query)
if not cjk_words:
return []
scope_placeholders = ','.join('?' * len(scopes))
# Build LIKE conditions for each word
like_conditions = []
params = []
for word in cjk_words:
like_conditions.append("text LIKE ?")
params.append(f'%{word}%')
where_clause = ' OR '.join(like_conditions)
params.extend(scopes)
if user_id:
sql_query = f"""
SELECT * FROM chunks
WHERE ({where_clause})
AND scope IN ({scope_placeholders})
AND (scope = 'shared' OR user_id = ?)
LIMIT ?
"""
params.extend([user_id, limit])
else:
sql_query = f"""
SELECT * FROM chunks
WHERE ({where_clause})
AND scope IN ({scope_placeholders})
LIMIT ?
"""
params.append(limit)
try:
rows = self.conn.execute(sql_query, params).fetchall()
return [
SearchResult(
path=row['path'],
start_line=row['start_line'],
end_line=row['end_line'],
score=0.5, # Fixed score for LIKE search
snippet=self._truncate_text(row['text'], 500),
source=row['source'],
user_id=row['user_id']
)
for row in rows
]
except Exception:
return []
def delete_by_path(self, path: str):
self.conn.execute("""
DELETE FROM chunks WHERE path = ?
""", (path,))
self.conn.commit()
def get_file_hash(self, path: str) -> Optional[str]:
row = self.conn.execute("""
SELECT hash FROM files WHERE path = ?
""", (path,)).fetchone()
return row['hash'] if row else None
def update_file_metadata(self, path: str, source: str, file_hash: str, mtime: int, size: int):
self.conn.execute("""
INSERT OR REPLACE INTO files (path, source, hash, mtime, size, updated_at)
VALUES (?, ?, ?, ?, ?, strftime('%s', 'now'))
""", (path, source, file_hash, mtime, size))
self.conn.commit()
def get_stats(self) -> Dict[str, int]:
chunks_count = self.conn.execute("""
SELECT COUNT(*) as cnt FROM chunks
""").fetchone()['cnt']
files_count = self.conn.execute("""
SELECT COUNT(*) as cnt FROM files
""").fetchone()['cnt']
return {
'chunks': chunks_count,
'files': files_count
}
def close(self):
if self.conn:
try:
self.conn.commit() # Ensure all changes are committed
self.conn.close()
self.conn = None # Mark as closed
except Exception as e:
print(f"⚠️ Error closing database connection: {e}")
def __del__(self):
try:
self.close()
except Exception:
pass # Ignore errors during cleanup
# Helper methods
def _row_to_chunk(self, row) -> MemoryChunk:
return MemoryChunk(
id=row['id'],
user_id=row['user_id'],
scope=row['scope'],
source=row['source'],
path=row['path'],
start_line=row['start_line'],
end_line=row['end_line'],
text=row['text'],
embedding=json.loads(row['embedding']) if row['embedding'] else None,
hash=row['hash'],
metadata=json.loads(row['metadata']) if row['metadata'] else None
)
@staticmethod
def _cosine_similarity(vec1: List[float], vec2: List[float]) -> float:
if len(vec1) != len(vec2):
return 0.0
dot_product = sum(a * b for a, b in zip(vec1, vec2))
norm1 = sum(a * a for a in vec1) ** 0.5
norm2 = sum(b * b for b in vec2) ** 0.5
if norm1 == 0 or norm2 == 0:
return 0.0
return dot_product / (norm1 * norm2)
@staticmethod
def _contains_cjk(text: str) -> bool:
import re
return bool(re.search(r'[\u4e00-\u9fff]', text))
@staticmethod
def _build_fts_query(raw_query: str) -> Optional[str]:
import re
# Extract words (primarily English words and numbers)
tokens = re.findall(r'[A-Za-z0-9_]+', raw_query)
if not tokens:
return None
# Quote tokens for exact matching
quoted = [f'"{t}"' for t in tokens]
# Use OR for more flexible matching
return ' OR '.join(quoted)
@staticmethod
def _bm25_rank_to_score(rank: float) -> float:
normalized = max(0, rank) if rank is not None else 999
return 1 / (1 + normalized)
@staticmethod
def _truncate_text(text: str, max_chars: int) -> str:
if len(text) <= max_chars:
return text
return text[:max_chars] + "..."
@staticmethod
def compute_hash(content: str) -> str:
return hashlib.sha256(content.encode('utf-8')).hexdigest() | --- +++ @@ -1,3 +1,8 @@+"""
+Storage layer for memory using SQLite + FTS5
+
+Provides vector and keyword search capabilities
+"""
from __future__ import annotations
import sqlite3
@@ -10,6 +15,7 @@
@dataclass
class MemoryChunk:
+ """Represents a memory chunk with text and embedding"""
id: str
user_id: Optional[str]
scope: str # "shared" | "user" | "session"
@@ -25,6 +31,7 @@
@dataclass
class SearchResult:
+ """Search result with score and snippet"""
path: str
start_line: int
end_line: int
@@ -35,6 +42,7 @@
class MemoryStorage:
+ """SQLite-based storage with FTS5 for keyword search"""
def __init__(self, db_path: Path):
self.db_path = db_path
@@ -43,6 +51,7 @@ self._init_db()
def _check_fts5_support(self) -> bool:
+ """Check if SQLite has FTS5 support"""
try:
self.conn.execute("CREATE VIRTUAL TABLE IF NOT EXISTS fts5_test USING fts5(test)")
self.conn.execute("DROP TABLE IF EXISTS fts5_test")
@@ -53,6 +62,7 @@ raise
def _init_db(self):
+ """Initialize database with schema"""
try:
self.conn = sqlite3.connect(str(self.db_path), check_same_thread=False)
self.conn.row_factory = sqlite3.Row
@@ -188,6 +198,7 @@ self.conn.commit()
def save_chunk(self, chunk: MemoryChunk):
+ """Save a memory chunk"""
self.conn.execute("""
INSERT OR REPLACE INTO chunks
(id, user_id, scope, source, path, start_line, end_line, text, embedding, hash, metadata, updated_at)
@@ -208,6 +219,7 @@ self.conn.commit()
def save_chunks_batch(self, chunks: List[MemoryChunk]):
+ """Save multiple chunks in a batch"""
self.conn.executemany("""
INSERT OR REPLACE INTO chunks
(id, user_id, scope, source, path, start_line, end_line, text, embedding, hash, metadata, updated_at)
@@ -225,6 +237,7 @@ self.conn.commit()
def get_chunk(self, chunk_id: str) -> Optional[MemoryChunk]:
+ """Get a chunk by ID"""
row = self.conn.execute("""
SELECT * FROM chunks WHERE id = ?
""", (chunk_id,)).fetchone()
@@ -241,6 +254,10 @@ scopes: List[str] = None,
limit: int = 10
) -> List[SearchResult]:
+ """
+ Vector similarity search using in-memory cosine similarity
+ (sqlite-vec can be added later for better performance)
+ """
if scopes is None:
scopes = ["shared"]
if user_id:
@@ -300,6 +317,13 @@ scopes: List[str] = None,
limit: int = 10
) -> List[SearchResult]:
+ """
+ Keyword search using FTS5 + LIKE fallback
+
+ Strategy:
+ 1. If FTS5 available: Try FTS5 search first (good for English and word-based languages)
+ 2. If no FTS5 or no results and query contains CJK: Use LIKE search
+ """
if scopes is None:
scopes = ["shared"]
if user_id:
@@ -324,6 +348,7 @@ scopes: List[str],
limit: int
) -> List[SearchResult]:
+ """FTS5 full-text search"""
fts_query = self._build_fts_query(query)
if not fts_query:
return []
@@ -379,6 +404,7 @@ scopes: List[str],
limit: int
) -> List[SearchResult]:
+ """LIKE-based search for CJK characters"""
import re
# Extract CJK words (2+ characters)
cjk_words = re.findall(r'[\u4e00-\u9fff]{2,}', query)
@@ -433,18 +459,21 @@ return []
def delete_by_path(self, path: str):
+ """Delete all chunks from a file"""
self.conn.execute("""
DELETE FROM chunks WHERE path = ?
""", (path,))
self.conn.commit()
def get_file_hash(self, path: str) -> Optional[str]:
+ """Get stored file hash"""
row = self.conn.execute("""
SELECT hash FROM files WHERE path = ?
""", (path,)).fetchone()
return row['hash'] if row else None
def update_file_metadata(self, path: str, source: str, file_hash: str, mtime: int, size: int):
+ """Update file metadata"""
self.conn.execute("""
INSERT OR REPLACE INTO files (path, source, hash, mtime, size, updated_at)
VALUES (?, ?, ?, ?, ?, strftime('%s', 'now'))
@@ -452,6 +481,7 @@ self.conn.commit()
def get_stats(self) -> Dict[str, int]:
+ """Get storage statistics"""
chunks_count = self.conn.execute("""
SELECT COUNT(*) as cnt FROM chunks
""").fetchone()['cnt']
@@ -466,6 +496,7 @@ }
def close(self):
+ """Close database connection"""
if self.conn:
try:
self.conn.commit() # Ensure all changes are committed
@@ -475,6 +506,7 @@ print(f"⚠️ Error closing database connection: {e}")
def __del__(self):
+ """Destructor to ensure connection is closed"""
try:
self.close()
except Exception:
@@ -483,6 +515,7 @@ # Helper methods
def _row_to_chunk(self, row) -> MemoryChunk:
+ """Convert database row to MemoryChunk"""
return MemoryChunk(
id=row['id'],
user_id=row['user_id'],
@@ -499,6 +532,7 @@
@staticmethod
def _cosine_similarity(vec1: List[float], vec2: List[float]) -> float:
+ """Calculate cosine similarity between two vectors"""
if len(vec1) != len(vec2):
return 0.0
@@ -513,11 +547,18 @@
@staticmethod
def _contains_cjk(text: str) -> bool:
+ """Check if text contains CJK (Chinese/Japanese/Korean) characters"""
import re
return bool(re.search(r'[\u4e00-\u9fff]', text))
@staticmethod
def _build_fts_query(raw_query: str) -> Optional[str]:
+ """
+ Build FTS5 query from raw text
+
+ Works best for English and word-based languages.
+ For CJK characters, LIKE search will be used as fallback.
+ """
import re
# Extract words (primarily English words and numbers)
tokens = re.findall(r'[A-Za-z0-9_]+', raw_query)
@@ -531,15 +572,18 @@
@staticmethod
def _bm25_rank_to_score(rank: float) -> float:
+ """Convert BM25 rank to 0-1 score"""
normalized = max(0, rank) if rank is not None else 999
return 1 / (1 + normalized)
@staticmethod
def _truncate_text(text: str, max_chars: int) -> str:
+ """Truncate text to max characters"""
if len(text) <= max_chars:
return text
return text[:max_chars] + "..."
@staticmethod
def compute_hash(content: str) -> str:
- return hashlib.sha256(content.encode('utf-8')).hexdigest()+ """Compute SHA256 hash of content"""
+ return hashlib.sha256(content.encode('utf-8')).hexdigest()
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/memory/storage.py |
Create documentation for each function signature |
import os
from pathlib import Path
from typing import List, Optional, Dict
from common.log import logger
from agent.skills.types import Skill, SkillEntry, LoadSkillsResult, SkillMetadata
from agent.skills.frontmatter import parse_frontmatter, parse_metadata, parse_boolean_value, get_frontmatter_value
class SkillLoader:
def __init__(self):
pass
def load_skills_from_dir(self, dir_path: str, source: str) -> LoadSkillsResult:
skills = []
diagnostics = []
if not os.path.exists(dir_path):
diagnostics.append(f"Directory does not exist: {dir_path}")
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
if not os.path.isdir(dir_path):
diagnostics.append(f"Path is not a directory: {dir_path}")
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
# Load skills from root-level .md files and subdirectories
result = self._load_skills_recursive(dir_path, source, include_root_files=True)
return result
def _load_skills_recursive(
self,
dir_path: str,
source: str,
include_root_files: bool = False
) -> LoadSkillsResult:
skills = []
diagnostics = []
try:
entries = os.listdir(dir_path)
except Exception as e:
diagnostics.append(f"Failed to list directory {dir_path}: {e}")
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
for entry in entries:
# Skip hidden files and directories
if entry.startswith('.'):
continue
# Skip common non-skill directories
if entry in ('node_modules', '__pycache__', 'venv', '.git'):
continue
full_path = os.path.join(dir_path, entry)
# Handle directories
if os.path.isdir(full_path):
# Recursively scan subdirectories
sub_result = self._load_skills_recursive(full_path, source, include_root_files=False)
skills.extend(sub_result.skills)
diagnostics.extend(sub_result.diagnostics)
continue
# Handle files
if not os.path.isfile(full_path):
continue
# Check if this is a skill file
is_root_md = include_root_files and entry.endswith('.md') and entry.upper() != 'README.MD'
is_skill_md = not include_root_files and entry == 'SKILL.md'
if not (is_root_md or is_skill_md):
continue
# Load the skill
skill_result = self._load_skill_from_file(full_path, source)
if skill_result.skills:
skills.extend(skill_result.skills)
diagnostics.extend(skill_result.diagnostics)
return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
def _load_skill_from_file(self, file_path: str, source: str) -> LoadSkillsResult:
diagnostics = []
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
except Exception as e:
diagnostics.append(f"Failed to read skill file {file_path}: {e}")
return LoadSkillsResult(skills=[], diagnostics=diagnostics)
# Parse frontmatter
frontmatter = parse_frontmatter(content)
# Get skill name and description
skill_dir = os.path.dirname(file_path)
parent_dir_name = os.path.basename(skill_dir)
name = frontmatter.get('name', parent_dir_name)
description = frontmatter.get('description', '')
# Normalize name (handle both string and list)
if isinstance(name, list):
name = name[0] if name else parent_dir_name
elif not isinstance(name, str):
name = str(name) if name else parent_dir_name
# Normalize description (handle both string and list)
if isinstance(description, list):
description = ' '.join(str(d) for d in description if d)
elif not isinstance(description, str):
description = str(description) if description else ''
# Special handling for linkai-agent: dynamically load apps from config.json
if name == 'linkai-agent':
description = self._load_linkai_agent_description(skill_dir, description)
if not description or not description.strip():
diagnostics.append(f"Skill {name} has no description: {file_path}")
return LoadSkillsResult(skills=[], diagnostics=diagnostics)
# Parse disable-model-invocation flag
disable_model_invocation = parse_boolean_value(
get_frontmatter_value(frontmatter, 'disable-model-invocation'),
default=False
)
# Create skill object
skill = Skill(
name=name,
description=description,
file_path=file_path,
base_dir=skill_dir,
source=source,
content=content,
disable_model_invocation=disable_model_invocation,
frontmatter=frontmatter,
)
return LoadSkillsResult(skills=[skill], diagnostics=diagnostics)
def _load_linkai_agent_description(self, skill_dir: str, default_description: str) -> str:
import json
config_path = os.path.join(skill_dir, "config.json")
# Without config.json, skip this skill entirely (return empty to trigger exclusion)
if not os.path.exists(config_path):
logger.debug(f"[SkillLoader] linkai-agent skipped: no config.json found")
return ""
try:
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
apps = config.get("apps", [])
if not apps:
return default_description
# Build dynamic description with app details
app_descriptions = "; ".join([
f"{app['app_name']}({app['app_code']}: {app['app_description']})"
for app in apps
])
return f"Call LinkAI apps/workflows. {app_descriptions}"
except Exception as e:
logger.warning(f"[SkillLoader] Failed to load linkai-agent config: {e}")
return default_description
def load_all_skills(
self,
builtin_dir: Optional[str] = None,
custom_dir: Optional[str] = None,
) -> Dict[str, SkillEntry]:
skill_map: Dict[str, SkillEntry] = {}
all_diagnostics = []
# Load builtin skills (lower precedence)
if builtin_dir and os.path.exists(builtin_dir):
result = self.load_skills_from_dir(builtin_dir, source='builtin')
all_diagnostics.extend(result.diagnostics)
for skill in result.skills:
entry = self._create_skill_entry(skill)
skill_map[skill.name] = entry
# Load custom skills (higher precedence, overrides builtin)
if custom_dir and os.path.exists(custom_dir):
result = self.load_skills_from_dir(custom_dir, source='custom')
all_diagnostics.extend(result.diagnostics)
for skill in result.skills:
entry = self._create_skill_entry(skill)
skill_map[skill.name] = entry
# Log diagnostics
if all_diagnostics:
logger.debug(f"Skill loading diagnostics: {len(all_diagnostics)} issues")
for diag in all_diagnostics[:5]:
logger.debug(f" - {diag}")
logger.debug(f"Loaded {len(skill_map)} skills total")
return skill_map
def _create_skill_entry(self, skill: Skill) -> SkillEntry:
metadata = parse_metadata(skill.frontmatter)
# Parse user-invocable flag
user_invocable = parse_boolean_value(
get_frontmatter_value(skill.frontmatter, 'user-invocable'),
default=True
)
return SkillEntry(
skill=skill,
metadata=metadata,
user_invocable=user_invocable,
) | --- +++ @@ -1,3 +1,6 @@+"""
+Skill loader for discovering and loading skills from directories.
+"""
import os
from pathlib import Path
@@ -8,11 +11,23 @@
class SkillLoader:
+ """Loads skills from various directories."""
def __init__(self):
pass
def load_skills_from_dir(self, dir_path: str, source: str) -> LoadSkillsResult:
+ """
+ Load skills from a directory.
+
+ Discovery rules:
+ - Direct .md files in the root directory
+ - Recursive SKILL.md files under subdirectories
+
+ :param dir_path: Directory path to scan
+ :param source: Source identifier ('builtin' or 'custom')
+ :return: LoadSkillsResult with skills and diagnostics
+ """
skills = []
diagnostics = []
@@ -35,6 +50,14 @@ source: str,
include_root_files: bool = False
) -> LoadSkillsResult:
+ """
+ Recursively load skills from a directory.
+
+ :param dir_path: Directory to scan
+ :param source: Source identifier
+ :param include_root_files: Whether to include root-level .md files
+ :return: LoadSkillsResult
+ """
skills = []
diagnostics = []
@@ -83,6 +106,13 @@ return LoadSkillsResult(skills=skills, diagnostics=diagnostics)
def _load_skill_from_file(self, file_path: str, source: str) -> LoadSkillsResult:
+ """
+ Load a single skill from a markdown file.
+
+ :param file_path: Path to the skill markdown file
+ :param source: Source identifier
+ :return: LoadSkillsResult
+ """
diagnostics = []
try:
@@ -143,6 +173,13 @@ return LoadSkillsResult(skills=[skill], diagnostics=diagnostics)
def _load_linkai_agent_description(self, skill_dir: str, default_description: str) -> str:
+ """
+ Dynamically load LinkAI agent description from config.json
+
+ :param skill_dir: Skill directory
+ :param default_description: Default description from SKILL.md
+ :return: Dynamic description with app list
+ """
import json
config_path = os.path.join(skill_dir, "config.json")
@@ -177,6 +214,19 @@ builtin_dir: Optional[str] = None,
custom_dir: Optional[str] = None,
) -> Dict[str, SkillEntry]:
+ """
+ Load skills from builtin and custom directories.
+
+ Precedence (lowest to highest):
+ 1. builtin — project root ``skills/``, shipped with the codebase
+ 2. custom — workspace ``skills/``, installed via cloud console or skill creator
+
+ Same-name custom skills override builtin ones.
+
+ :param builtin_dir: Built-in skills directory
+ :param custom_dir: Custom skills directory
+ :return: Dictionary mapping skill name to SkillEntry
+ """
skill_map: Dict[str, SkillEntry] = {}
all_diagnostics = []
@@ -207,6 +257,12 @@ return skill_map
def _create_skill_entry(self, skill: Skill) -> SkillEntry:
+ """
+ Create a SkillEntry from a Skill with parsed metadata.
+
+ :param skill: The skill to create an entry for
+ :return: SkillEntry with metadata
+ """
metadata = parse_metadata(skill.frontmatter)
# Parse user-invocable flag
@@ -219,4 +275,4 @@ skill=skill,
metadata=metadata,
user_invocable=user_invocable,
- )+ )
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/skills/loader.py |
Annotate my code with docstrings |
import hashlib
from abc import ABC, abstractmethod
from typing import List, Optional
class EmbeddingProvider(ABC):
@abstractmethod
def embed(self, text: str) -> List[float]:
pass
@abstractmethod
def embed_batch(self, texts: List[str]) -> List[List[float]]:
pass
@property
@abstractmethod
def dimensions(self) -> int:
pass
class OpenAIEmbeddingProvider(EmbeddingProvider):
def __init__(self, model: str = "text-embedding-3-small", api_key: Optional[str] = None, api_base: Optional[str] = None):
self.model = model
self.api_key = api_key
self.api_base = api_base or "https://api.openai.com/v1"
# Validate API key
if not self.api_key or self.api_key in ["", "YOUR API KEY", "YOUR_API_KEY"]:
raise ValueError("OpenAI API key is not configured. Please set 'open_ai_api_key' in config.json")
# Set dimensions based on model
self._dimensions = 1536 if "small" in model else 3072
def _call_api(self, input_data):
import requests
url = f"{self.api_base}/embeddings"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
data = {
"input": input_data,
"model": self.model
}
try:
response = requests.post(url, headers=headers, json=data, timeout=5)
response.raise_for_status()
return response.json()
except requests.exceptions.ConnectionError as e:
raise ConnectionError(f"Failed to connect to OpenAI API at {url}. Please check your network connection and api_base configuration. Error: {str(e)}")
except requests.exceptions.Timeout as e:
raise TimeoutError(f"OpenAI API request timed out after 10s. Please check your network connection. Error: {str(e)}")
except requests.exceptions.HTTPError as e:
if e.response.status_code == 401:
raise ValueError(f"Invalid OpenAI API key. Please check your 'open_ai_api_key' in config.json")
elif e.response.status_code == 429:
raise ValueError(f"OpenAI API rate limit exceeded. Please try again later.")
else:
raise ValueError(f"OpenAI API request failed: {e.response.status_code} - {e.response.text}")
def embed(self, text: str) -> List[float]:
result = self._call_api(text)
return result["data"][0]["embedding"]
def embed_batch(self, texts: List[str]) -> List[List[float]]:
if not texts:
return []
result = self._call_api(texts)
return [item["embedding"] for item in result["data"]]
@property
def dimensions(self) -> int:
return self._dimensions
# LocalEmbeddingProvider removed - only use OpenAI embedding or keyword search
class EmbeddingCache:
def __init__(self):
self.cache = {}
def get(self, text: str, provider: str, model: str) -> Optional[List[float]]:
key = self._compute_key(text, provider, model)
return self.cache.get(key)
def put(self, text: str, provider: str, model: str, embedding: List[float]):
key = self._compute_key(text, provider, model)
self.cache[key] = embedding
@staticmethod
def _compute_key(text: str, provider: str, model: str) -> str:
content = f"{provider}:{model}:{text}"
return hashlib.md5(content.encode('utf-8')).hexdigest()
def clear(self):
self.cache.clear()
def create_embedding_provider(
provider: str = "openai",
model: Optional[str] = None,
api_key: Optional[str] = None,
api_base: Optional[str] = None
) -> EmbeddingProvider:
if provider not in ("openai", "linkai"):
raise ValueError(f"Unsupported embedding provider: {provider}. Use 'openai' or 'linkai'.")
model = model or "text-embedding-3-small"
return OpenAIEmbeddingProvider(model=model, api_key=api_key, api_base=api_base) | --- +++ @@ -1,3 +1,8 @@+"""
+Embedding providers for memory
+
+Supports OpenAI and local embedding models
+"""
import hashlib
from abc import ABC, abstractmethod
@@ -5,24 +10,37 @@
class EmbeddingProvider(ABC):
+ """Base class for embedding providers"""
@abstractmethod
def embed(self, text: str) -> List[float]:
+ """Generate embedding for text"""
pass
@abstractmethod
def embed_batch(self, texts: List[str]) -> List[List[float]]:
+ """Generate embeddings for multiple texts"""
pass
@property
@abstractmethod
def dimensions(self) -> int:
+ """Get embedding dimensions"""
pass
class OpenAIEmbeddingProvider(EmbeddingProvider):
+ """OpenAI embedding provider using REST API"""
def __init__(self, model: str = "text-embedding-3-small", api_key: Optional[str] = None, api_base: Optional[str] = None):
+ """
+ Initialize OpenAI embedding provider
+
+ Args:
+ model: Model name (text-embedding-3-small or text-embedding-3-large)
+ api_key: OpenAI API key
+ api_base: Optional API base URL
+ """
self.model = model
self.api_key = api_key
self.api_base = api_base or "https://api.openai.com/v1"
@@ -35,6 +53,7 @@ self._dimensions = 1536 if "small" in model else 3072
def _call_api(self, input_data):
+ """Call OpenAI embedding API using requests"""
import requests
url = f"{self.api_base}/embeddings"
@@ -64,10 +83,12 @@ raise ValueError(f"OpenAI API request failed: {e.response.status_code} - {e.response.text}")
def embed(self, text: str) -> List[float]:
+ """Generate embedding for text"""
result = self._call_api(text)
return result["data"][0]["embedding"]
def embed_batch(self, texts: List[str]) -> List[List[float]]:
+ """Generate embeddings for multiple texts"""
if not texts:
return []
@@ -83,24 +104,29 @@
class EmbeddingCache:
+ """Cache for embeddings to avoid recomputation"""
def __init__(self):
self.cache = {}
def get(self, text: str, provider: str, model: str) -> Optional[List[float]]:
+ """Get cached embedding"""
key = self._compute_key(text, provider, model)
return self.cache.get(key)
def put(self, text: str, provider: str, model: str, embedding: List[float]):
+ """Cache embedding"""
key = self._compute_key(text, provider, model)
self.cache[key] = embedding
@staticmethod
def _compute_key(text: str, provider: str, model: str) -> str:
+ """Compute cache key"""
content = f"{provider}:{model}:{text}"
return hashlib.md5(content.encode('utf-8')).hexdigest()
def clear(self):
+ """Clear cache"""
self.cache.clear()
@@ -110,8 +136,26 @@ api_key: Optional[str] = None,
api_base: Optional[str] = None
) -> EmbeddingProvider:
+ """
+ Factory function to create embedding provider
+
+ Supports "openai" and "linkai" providers (both use OpenAI-compatible REST API).
+ If initialization fails, caller should fall back to keyword-only search.
+
+ Args:
+ provider: Provider name ("openai" or "linkai")
+ model: Model name (default: text-embedding-3-small)
+ api_key: API key (required)
+ api_base: API base URL
+
+ Returns:
+ EmbeddingProvider instance
+
+ Raises:
+ ValueError: If provider is unsupported or api_key is missing
+ """
if provider not in ("openai", "linkai"):
raise ValueError(f"Unsupported embedding provider: {provider}. Use 'openai' or 'linkai'.")
model = model or "text-embedding-3-small"
- return OpenAIEmbeddingProvider(model=model, api_key=api_key, api_base=api_base)+ return OpenAIEmbeddingProvider(model=model, api_key=api_key, api_base=api_base)
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/memory/embedding.py |
Generate documentation strings for clarity | import time
import logging
logger = logging.getLogger(__name__)
class FileCache:
def __init__(self, ttl=120):
self.cache = {}
self.ttl = ttl
def add(self, session_id: str, file_path: str, file_type: str = "image"):
if session_id not in self.cache:
self.cache[session_id] = {
'files': [],
'timestamp': time.time()
}
# 添加文件(去重)
file_info = {'path': file_path, 'type': file_type}
if file_info not in self.cache[session_id]['files']:
self.cache[session_id]['files'].append(file_info)
logger.info(f"[FileCache] Added {file_type} to cache for session {session_id}: {file_path}")
def get(self, session_id: str) -> list:
if session_id not in self.cache:
return []
item = self.cache[session_id]
# 检查是否过期
if time.time() - item['timestamp'] > self.ttl:
logger.info(f"[FileCache] Cache expired for session {session_id}, clearing...")
del self.cache[session_id]
return []
return item['files']
def clear(self, session_id: str):
if session_id in self.cache:
logger.info(f"[FileCache] Cleared cache for session {session_id}")
del self.cache[session_id]
def cleanup_expired(self):
current_time = time.time()
expired_sessions = []
for session_id, item in self.cache.items():
if current_time - item['timestamp'] > self.ttl:
expired_sessions.append(session_id)
for session_id in expired_sessions:
del self.cache[session_id]
logger.debug(f"[FileCache] Cleaned up expired cache for session {session_id}")
if expired_sessions:
logger.info(f"[FileCache] Cleaned up {len(expired_sessions)} expired cache(s)")
# 全局单例
_file_cache = FileCache()
def get_file_cache() -> FileCache:
return _file_cache | --- +++ @@ -1,3 +1,7 @@+"""
+文件缓存管理器
+用于缓存单独发送的文件消息(图片、视频、文档等),在用户提问时自动附加
+"""
import time
import logging
@@ -5,12 +9,25 @@
class FileCache:
+ """文件缓存管理器,按 session_id 缓存文件,TTL=2分钟"""
def __init__(self, ttl=120):
+ """
+ Args:
+ ttl: 缓存过期时间(秒),默认2分钟
+ """
self.cache = {}
self.ttl = ttl
def add(self, session_id: str, file_path: str, file_type: str = "image"):
+ """
+ 添加文件到缓存
+
+ Args:
+ session_id: 会话ID
+ file_path: 文件本地路径
+ file_type: 文件类型(image, video, file 等)
+ """
if session_id not in self.cache:
self.cache[session_id] = {
'files': [],
@@ -24,6 +41,15 @@ logger.info(f"[FileCache] Added {file_type} to cache for session {session_id}: {file_path}")
def get(self, session_id: str) -> list:
+ """
+ 获取缓存的文件列表
+
+ Args:
+ session_id: 会话ID
+
+ Returns:
+ 文件信息列表 [{'path': '...', 'type': 'image'}, ...],如果没有或已过期返回空列表
+ """
if session_id not in self.cache:
return []
@@ -38,11 +64,18 @@ return item['files']
def clear(self, session_id: str):
+ """
+ 清除指定会话的缓存
+
+ Args:
+ session_id: 会话ID
+ """
if session_id in self.cache:
logger.info(f"[FileCache] Cleared cache for session {session_id}")
del self.cache[session_id]
def cleanup_expired(self):
+ """清理所有过期的缓存"""
current_time = time.time()
expired_sessions = []
@@ -63,4 +96,5 @@
def get_file_cache() -> FileCache:
- return _file_cache+ """获取全局文件缓存实例"""
+ return _file_cache
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/channel/file_cache.py |
Generate docstrings for script automation |
import re
import json
from typing import Dict, Any, Optional, List
from agent.skills.types import SkillMetadata, SkillInstallSpec
def parse_frontmatter(content: str) -> Dict[str, Any]:
frontmatter = {}
# Match frontmatter block between --- markers
match = re.match(r'^---\s*\n(.*?)\n---\s*\n', content, re.DOTALL)
if not match:
return frontmatter
frontmatter_text = match.group(1)
# Try to use PyYAML for proper YAML parsing
try:
import yaml
frontmatter = yaml.safe_load(frontmatter_text)
if not isinstance(frontmatter, dict):
frontmatter = {}
return frontmatter
except ImportError:
# Fallback to simple parsing if PyYAML not available
pass
except Exception:
# If YAML parsing fails, fall back to simple parsing
pass
# Simple YAML-like parsing (supports key: value format only)
# This is a fallback for when PyYAML is not available
for line in frontmatter_text.split('\n'):
line = line.strip()
if not line or line.startswith('#'):
continue
if ':' in line:
key, value = line.split(':', 1)
key = key.strip()
value = value.strip()
# Try to parse as JSON if it looks like JSON
if value.startswith('{') or value.startswith('['):
try:
value = json.loads(value)
except json.JSONDecodeError:
pass
# Parse boolean values
elif value.lower() in ('true', 'false'):
value = value.lower() == 'true'
# Parse numbers
elif value.isdigit():
value = int(value)
frontmatter[key] = value
return frontmatter
def parse_metadata(frontmatter: Dict[str, Any]) -> Optional[SkillMetadata]:
metadata_raw = frontmatter.get('metadata')
if not metadata_raw:
return None
# If it's a string, try to parse as JSON
if isinstance(metadata_raw, str):
try:
metadata_raw = json.loads(metadata_raw)
except json.JSONDecodeError:
return None
if not isinstance(metadata_raw, dict):
return None
# Use metadata_raw directly (COW format)
meta_obj = metadata_raw
# Parse install specs
install_specs = []
install_raw = meta_obj.get('install', [])
if isinstance(install_raw, list):
for spec_raw in install_raw:
if not isinstance(spec_raw, dict):
continue
kind = spec_raw.get('kind', spec_raw.get('type', '')).lower()
if not kind:
continue
spec = SkillInstallSpec(
kind=kind,
id=spec_raw.get('id'),
label=spec_raw.get('label'),
bins=_normalize_string_list(spec_raw.get('bins')),
os=_normalize_string_list(spec_raw.get('os')),
formula=spec_raw.get('formula'),
package=spec_raw.get('package'),
module=spec_raw.get('module'),
url=spec_raw.get('url'),
archive=spec_raw.get('archive'),
extract=spec_raw.get('extract', False),
strip_components=spec_raw.get('stripComponents'),
target_dir=spec_raw.get('targetDir'),
)
install_specs.append(spec)
# Parse requires
requires = {}
requires_raw = meta_obj.get('requires', {})
if isinstance(requires_raw, dict):
for key, value in requires_raw.items():
requires[key] = _normalize_string_list(value)
return SkillMetadata(
always=meta_obj.get('always', False),
skill_key=meta_obj.get('skillKey'),
primary_env=meta_obj.get('primaryEnv'),
emoji=meta_obj.get('emoji'),
homepage=meta_obj.get('homepage'),
os=_normalize_string_list(meta_obj.get('os')),
requires=requires,
install=install_specs,
)
def _normalize_string_list(value: Any) -> List[str]:
if not value:
return []
if isinstance(value, list):
return [str(v).strip() for v in value if v]
if isinstance(value, str):
return [v.strip() for v in value.split(',') if v.strip()]
return []
def parse_boolean_value(value: Optional[str], default: bool = False) -> bool:
if value is None:
return default
if isinstance(value, bool):
return value
if isinstance(value, str):
return value.lower() in ('true', '1', 'yes', 'on')
return default
def get_frontmatter_value(frontmatter: Dict[str, Any], key: str) -> Optional[str]:
value = frontmatter.get(key)
return str(value) if value is not None else None | --- +++ @@ -1,3 +1,6 @@+"""
+Frontmatter parsing for skills.
+"""
import re
import json
@@ -6,6 +9,11 @@
def parse_frontmatter(content: str) -> Dict[str, Any]:
+ """
+ Parse YAML-style frontmatter from markdown content.
+
+ Returns a dictionary of frontmatter fields.
+ """
frontmatter = {}
# Match frontmatter block between --- markers
@@ -60,6 +68,11 @@
def parse_metadata(frontmatter: Dict[str, Any]) -> Optional[SkillMetadata]:
+ """
+ Parse skill metadata from frontmatter.
+
+ Looks for 'metadata' field containing JSON with skill configuration.
+ """
metadata_raw = frontmatter.get('metadata')
if not metadata_raw:
return None
@@ -126,6 +139,7 @@
def _normalize_string_list(value: Any) -> List[str]:
+ """Normalize a value to a list of strings."""
if not value:
return []
@@ -139,6 +153,7 @@
def parse_boolean_value(value: Optional[str], default: bool = False) -> bool:
+ """Parse a boolean value from frontmatter."""
if value is None:
return default
@@ -152,5 +167,6 @@
def get_frontmatter_value(frontmatter: Dict[str, Any], key: str) -> Optional[str]:
+ """Get a frontmatter value as a string."""
value = frontmatter.get(key)
- return str(value) if value is not None else None+ return str(value) if value is not None else None
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/skills/frontmatter.py |
Create structured documentation for my script | from models.bot_factory import create_bot
from bridge.context import Context
from bridge.reply import Reply
from common import const
from common.log import logger
from common.singleton import singleton
from config import conf
from translate.factory import create_translator
from voice.factory import create_voice
@singleton
class Bridge(object):
def __init__(self):
self.btype = {
"chat": const.OPENAI,
"voice_to_text": conf().get("voice_to_text", "openai"),
"text_to_voice": conf().get("text_to_voice", "google"),
"translate": conf().get("translate", "baidu"),
}
# 这边取配置的模型
bot_type = conf().get("bot_type")
if bot_type:
self.btype["chat"] = bot_type
else:
model_type = conf().get("model") or const.GPT_41_MINI
# Ensure model_type is string to prevent AttributeError when using startswith()
# This handles cases where numeric model names (e.g., "1") are parsed as integers from YAML
if not isinstance(model_type, str):
logger.warning(f"[Bridge] model_type is not a string: {model_type} (type: {type(model_type).__name__}), converting to string")
model_type = str(model_type)
if model_type in ["text-davinci-003"]:
self.btype["chat"] = const.OPEN_AI
if conf().get("use_azure_chatgpt", False):
self.btype["chat"] = const.CHATGPTONAZURE
if model_type in ["wenxin", "wenxin-4"]:
self.btype["chat"] = const.BAIDU
if model_type in ["xunfei"]:
self.btype["chat"] = const.XUNFEI
if model_type in [const.QWEN]:
self.btype["chat"] = const.QWEN
if model_type in [const.QWEN_TURBO, const.QWEN_PLUS, const.QWEN_MAX]:
self.btype["chat"] = const.QWEN_DASHSCOPE
# Support Qwen3 and other DashScope models
if model_type and (model_type.startswith("qwen") or model_type.startswith("qwq") or model_type.startswith("qvq")):
self.btype["chat"] = const.QWEN_DASHSCOPE
if model_type and model_type.startswith("gemini"):
self.btype["chat"] = const.GEMINI
if model_type and model_type.startswith("glm"):
self.btype["chat"] = const.ZHIPU_AI
if model_type and model_type.startswith("claude"):
self.btype["chat"] = const.CLAUDEAPI
if model_type in [const.MOONSHOT, "moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k"]:
self.btype["chat"] = const.MOONSHOT
if model_type and model_type.startswith("kimi"):
self.btype["chat"] = const.MOONSHOT
if model_type and model_type.startswith("doubao"):
self.btype["chat"] = const.DOUBAO
if model_type in [const.MODELSCOPE]:
self.btype["chat"] = const.MODELSCOPE
# MiniMax models
if model_type and (model_type in ["abab6.5-chat", "abab6.5"] or model_type.lower().startswith("minimax")):
self.btype["chat"] = const.MiniMax
if conf().get("use_linkai") and conf().get("linkai_api_key"):
self.btype["chat"] = const.LINKAI
if not conf().get("voice_to_text") or conf().get("voice_to_text") in ["openai"]:
self.btype["voice_to_text"] = const.LINKAI
if not conf().get("text_to_voice") or conf().get("text_to_voice") in ["openai", const.TTS_1, const.TTS_1_HD]:
self.btype["text_to_voice"] = const.LINKAI
self.bots = {}
self.chat_bots = {}
self._agent_bridge = None
# 模型对应的接口
def get_bot(self, typename):
if self.bots.get(typename) is None:
logger.info("create bot {} for {}".format(self.btype[typename], typename))
if typename == "text_to_voice":
self.bots[typename] = create_voice(self.btype[typename])
elif typename == "voice_to_text":
self.bots[typename] = create_voice(self.btype[typename])
elif typename == "chat":
self.bots[typename] = create_bot(self.btype[typename])
elif typename == "translate":
self.bots[typename] = create_translator(self.btype[typename])
return self.bots[typename]
def get_bot_type(self, typename):
return self.btype[typename]
def fetch_reply_content(self, query, context: Context) -> Reply:
return self.get_bot("chat").reply(query, context)
def fetch_voice_to_text(self, voiceFile) -> Reply:
return self.get_bot("voice_to_text").voiceToText(voiceFile)
def fetch_text_to_voice(self, text) -> Reply:
return self.get_bot("text_to_voice").textToVoice(text)
def fetch_translate(self, text, from_lang="", to_lang="en") -> Reply:
return self.get_bot("translate").translate(text, from_lang, to_lang)
def find_chat_bot(self, bot_type: str):
if self.chat_bots.get(bot_type) is None:
self.chat_bots[bot_type] = create_bot(bot_type)
return self.chat_bots.get(bot_type)
def reset_bot(self):
self.__init__()
def get_agent_bridge(self):
if self._agent_bridge is None:
from bridge.agent_bridge import AgentBridge
self._agent_bridge = AgentBridge(self)
return self._agent_bridge
def fetch_agent_reply(self, query: str, context: Context = None,
on_event=None, clear_history: bool = False) -> Reply:
agent_bridge = self.get_agent_bridge()
return agent_bridge.agent_reply(query, context, on_event, clear_history) | --- +++ @@ -114,9 +114,15 @@ return self.chat_bots.get(bot_type)
def reset_bot(self):
+ """
+ 重置bot路由
+ """
self.__init__()
def get_agent_bridge(self):
+ """
+ Get agent bridge for agent-based conversations
+ """
if self._agent_bridge is None:
from bridge.agent_bridge import AgentBridge
self._agent_bridge = AgentBridge(self)
@@ -124,5 +130,17 @@
def fetch_agent_reply(self, query: str, context: Context = None,
on_event=None, clear_history: bool = False) -> Reply:
+ """
+ Use super agent to handle the query
+
+ Args:
+ query: User query
+ context: Context object
+ on_event: Event callback for streaming
+ clear_history: Whether to clear conversation history
+
+ Returns:
+ Reply object
+ """
agent_bridge = self.get_agent_bridge()
- return agent_bridge.agent_reply(query, context, on_event, clear_history)+ return agent_bridge.agent_reply(query, context, on_event, clear_history)
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/bridge/bridge.py |
Add missing documentation to my Python functions |
import os
import json
from typing import Dict, Any, Optional
import requests
from agent.tools.base_tool import BaseTool, ToolResult
from common.log import logger
from config import conf
# Default timeout for API requests (seconds)
DEFAULT_TIMEOUT = 30
class WebSearch(BaseTool):
name: str = "web_search"
description: str = "Search the web for real-time information. Returns titles, URLs, and snippets."
params: dict = {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "Search query string"
},
"count": {
"type": "integer",
"description": "Number of results to return (1-50, default: 10)"
},
"freshness": {
"type": "string",
"description": (
"Time range filter. Options: "
"'noLimit' (default), 'oneDay', 'oneWeek', 'oneMonth', 'oneYear', "
"or date range like '2025-01-01..2025-02-01'"
)
},
"summary": {
"type": "boolean",
"description": "Whether to include text summary for each result (default: false)"
}
},
"required": ["query"]
}
def __init__(self, config: dict = None):
self.config = config or {}
self._backend = None # Will be resolved on first execute
@staticmethod
def is_available() -> bool:
return bool(os.environ.get("BOCHA_API_KEY") or os.environ.get("LINKAI_API_KEY"))
def _resolve_backend(self) -> Optional[str]:
if os.environ.get("BOCHA_API_KEY"):
return "bocha"
if os.environ.get("LINKAI_API_KEY"):
return "linkai"
return None
def execute(self, args: Dict[str, Any]) -> ToolResult:
query = args.get("query", "").strip()
if not query:
return ToolResult.fail("Error: 'query' parameter is required")
count = args.get("count", 10)
freshness = args.get("freshness", "noLimit")
summary = args.get("summary", False)
# Validate count
if not isinstance(count, int) or count < 1 or count > 50:
count = 10
# Resolve backend
backend = self._resolve_backend()
if not backend:
return ToolResult.fail(
"Error: No search API key configured. "
"Please set BOCHA_API_KEY or LINKAI_API_KEY using env_config tool.\n"
" - Bocha Search: https://open.bocha.cn\n"
" - LinkAI Search: https://link-ai.tech"
)
try:
if backend == "bocha":
return self._search_bocha(query, count, freshness, summary)
else:
return self._search_linkai(query, count, freshness)
except requests.Timeout:
return ToolResult.fail(f"Error: Search request timed out after {DEFAULT_TIMEOUT}s")
except requests.ConnectionError:
return ToolResult.fail("Error: Failed to connect to search API")
except Exception as e:
logger.error(f"[WebSearch] Unexpected error: {e}", exc_info=True)
return ToolResult.fail(f"Error: Search failed - {str(e)}")
def _search_bocha(self, query: str, count: int, freshness: str, summary: bool) -> ToolResult:
api_key = os.environ.get("BOCHA_API_KEY", "")
url = "https://api.bocha.cn/v1/web-search"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"Accept": "application/json"
}
payload = {
"query": query,
"count": count,
"freshness": freshness,
"summary": summary
}
logger.debug(f"[WebSearch] Bocha search: query='{query}', count={count}")
response = requests.post(url, headers=headers, json=payload, timeout=DEFAULT_TIMEOUT)
if response.status_code == 401:
return ToolResult.fail("Error: Invalid BOCHA_API_KEY. Please check your API key.")
if response.status_code == 403:
return ToolResult.fail("Error: Bocha API - insufficient balance. Please top up at https://open.bocha.cn")
if response.status_code == 429:
return ToolResult.fail("Error: Bocha API rate limit reached. Please try again later.")
if response.status_code != 200:
return ToolResult.fail(f"Error: Bocha API returned HTTP {response.status_code}")
data = response.json()
# Check API-level error code
api_code = data.get("code")
if api_code is not None and api_code != 200:
msg = data.get("msg") or "Unknown error"
return ToolResult.fail(f"Error: Bocha API error (code={api_code}): {msg}")
# Extract and format results
return self._format_bocha_results(data, query)
def _format_bocha_results(self, data: dict, query: str) -> ToolResult:
search_data = data.get("data", {})
web_pages = search_data.get("webPages", {})
pages = web_pages.get("value", [])
if not pages:
return ToolResult.success({
"query": query,
"backend": "bocha",
"total": 0,
"results": [],
"message": "No results found"
})
results = []
for page in pages:
result = {
"title": page.get("name", ""),
"url": page.get("url", ""),
"snippet": page.get("snippet", ""),
"siteName": page.get("siteName", ""),
"datePublished": page.get("datePublished") or page.get("dateLastCrawled", ""),
}
# Include summary only if present
if page.get("summary"):
result["summary"] = page["summary"]
results.append(result)
total = web_pages.get("totalEstimatedMatches", len(results))
return ToolResult.success({
"query": query,
"backend": "bocha",
"total": total,
"count": len(results),
"results": results
})
def _search_linkai(self, query: str, count: int, freshness: str) -> ToolResult:
api_key = os.environ.get("LINKAI_API_KEY", "")
api_base = conf().get("linkai_api_base", "https://api.link-ai.tech")
url = f"{api_base.rstrip('/')}/v1/plugin/execute"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
payload = {
"code": "web-search",
"args": {
"query": query,
"count": count,
"freshness": freshness
}
}
logger.debug(f"[WebSearch] LinkAI search: query='{query}', count={count}")
response = requests.post(url, headers=headers, json=payload, timeout=DEFAULT_TIMEOUT)
if response.status_code == 401:
return ToolResult.fail("Error: Invalid LINKAI_API_KEY. Please check your API key.")
if response.status_code != 200:
return ToolResult.fail(f"Error: LinkAI API returned HTTP {response.status_code}")
data = response.json()
if not data.get("success"):
msg = data.get("message") or "Unknown error"
return ToolResult.fail(f"Error: LinkAI search failed: {msg}")
return self._format_linkai_results(data, query)
def _format_linkai_results(self, data: dict, query: str) -> ToolResult:
raw_data = data.get("data", "")
# LinkAI may return data as a JSON string
if isinstance(raw_data, str):
try:
raw_data = json.loads(raw_data)
except (json.JSONDecodeError, TypeError):
# If data is plain text, return it as a single result
return ToolResult.success({
"query": query,
"backend": "linkai",
"total": 1,
"count": 1,
"results": [{"content": raw_data}]
})
# If the response follows Bing-compatible structure
if isinstance(raw_data, dict):
web_pages = raw_data.get("webPages", {})
pages = web_pages.get("value", [])
if pages:
results = []
for page in pages:
result = {
"title": page.get("name", ""),
"url": page.get("url", ""),
"snippet": page.get("snippet", ""),
"siteName": page.get("siteName", ""),
"datePublished": page.get("datePublished") or page.get("dateLastCrawled", ""),
}
if page.get("summary"):
result["summary"] = page["summary"]
results.append(result)
total = web_pages.get("totalEstimatedMatches", len(results))
return ToolResult.success({
"query": query,
"backend": "linkai",
"total": total,
"count": len(results),
"results": results
})
# Fallback: return raw data
return ToolResult.success({
"query": query,
"backend": "linkai",
"total": 1,
"count": 1,
"results": [{"content": str(raw_data)}]
}) | --- +++ @@ -1,3 +1,9 @@+"""
+Web Search tool - Search the web using Bocha or LinkAI search API.
+Supports two backends with unified response format:
+ 1. Bocha Search (primary, requires BOCHA_API_KEY)
+ 2. LinkAI Search (fallback, requires LINKAI_API_KEY)
+"""
import os
import json
@@ -15,6 +21,7 @@
class WebSearch(BaseTool):
+ """Tool for searching the web using Bocha or LinkAI search API"""
name: str = "web_search"
description: str = "Search the web for real-time information. Returns titles, URLs, and snippets."
@@ -52,9 +59,16 @@
@staticmethod
def is_available() -> bool:
+ """Check if web search is available (at least one API key is configured)"""
return bool(os.environ.get("BOCHA_API_KEY") or os.environ.get("LINKAI_API_KEY"))
def _resolve_backend(self) -> Optional[str]:
+ """
+ Determine which search backend to use.
+ Priority: Bocha > LinkAI
+
+ :return: 'bocha', 'linkai', or None
+ """
if os.environ.get("BOCHA_API_KEY"):
return "bocha"
if os.environ.get("LINKAI_API_KEY"):
@@ -62,6 +76,12 @@ return None
def execute(self, args: Dict[str, Any]) -> ToolResult:
+ """
+ Execute web search
+
+ :param args: Search parameters (query, count, freshness, summary)
+ :return: Search results
+ """
query = args.get("query", "").strip()
if not query:
return ToolResult.fail("Error: 'query' parameter is required")
@@ -98,6 +118,15 @@ return ToolResult.fail(f"Error: Search failed - {str(e)}")
def _search_bocha(self, query: str, count: int, freshness: str, summary: bool) -> ToolResult:
+ """
+ Search using Bocha API
+
+ :param query: Search query
+ :param count: Number of results
+ :param freshness: Time range filter
+ :param summary: Whether to include summary
+ :return: Formatted search results
+ """
api_key = os.environ.get("BOCHA_API_KEY", "")
url = "https://api.bocha.cn/v1/web-search"
@@ -139,6 +168,13 @@ return self._format_bocha_results(data, query)
def _format_bocha_results(self, data: dict, query: str) -> ToolResult:
+ """
+ Format Bocha API response into unified result structure
+
+ :param data: Raw API response
+ :param query: Original query
+ :return: Formatted ToolResult
+ """
search_data = data.get("data", {})
web_pages = search_data.get("webPages", {})
pages = web_pages.get("value", [])
@@ -177,6 +213,14 @@ })
def _search_linkai(self, query: str, count: int, freshness: str) -> ToolResult:
+ """
+ Search using LinkAI plugin API
+
+ :param query: Search query
+ :param count: Number of results
+ :param freshness: Time range filter
+ :return: Formatted search results
+ """
api_key = os.environ.get("LINKAI_API_KEY", "")
api_base = conf().get("linkai_api_base", "https://api.link-ai.tech")
url = f"{api_base.rstrip('/')}/v1/plugin/execute"
@@ -213,6 +257,15 @@ return self._format_linkai_results(data, query)
def _format_linkai_results(self, data: dict, query: str) -> ToolResult:
+ """
+ Format LinkAI API response into unified result structure.
+ LinkAI returns the search data in data.data field, which follows
+ the same Bing-compatible format as Bocha.
+
+ :param data: Raw API response
+ :param query: Original query
+ :return: Formatted ToolResult
+ """
raw_data = data.get("data", "")
# LinkAI may return data as a JSON string
@@ -264,4 +317,4 @@ "total": 1,
"count": 1,
"results": [{"content": str(raw_data)}]
- })+ })
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/tools/web_search/web_search.py |
Fill in missing docstrings in my code | import json
import time
from typing import List, Dict, Any, Optional, Callable, Tuple
from agent.protocol.models import LLMRequest, LLMModel
from agent.protocol.message_utils import sanitize_claude_messages, compress_turn_to_text_only
from agent.tools.base_tool import BaseTool, ToolResult
from common.log import logger
class AgentStreamExecutor:
def __init__(
self,
agent, # Agent instance
model: LLMModel,
system_prompt: str,
tools: List[BaseTool],
max_turns: int = 50,
on_event: Optional[Callable] = None,
messages: Optional[List[Dict]] = None,
max_context_turns: int = 30
):
self.agent = agent
self.model = model
self.system_prompt = system_prompt
# Convert tools list to dict
self.tools = {tool.name: tool for tool in tools} if isinstance(tools, list) else tools
self.max_turns = max_turns
self.on_event = on_event
self.max_context_turns = max_context_turns
# Message history - use provided messages or create new list
self.messages = messages if messages is not None else []
# Tool failure tracking for retry protection
self.tool_failure_history = [] # List of (tool_name, args_hash, success) tuples
# Track files to send (populated by read tool)
self.files_to_send = [] # List of file metadata dicts
def _emit_event(self, event_type: str, data: dict = None):
if self.on_event:
try:
self.on_event({
"type": event_type,
"timestamp": time.time(),
"data": data or {}
})
except Exception as e:
logger.error(f"Event callback error: {e}")
def _filter_think_tags(self, text: str) -> str:
if not text:
return text
import re
# Remove only the <think> and </think> tags, keep the content
text = re.sub(r'<think>', '', text)
text = re.sub(r'</think>', '', text)
return text
def _hash_args(self, args: dict) -> str:
import hashlib
# Sort keys for consistent hashing
args_str = json.dumps(args, sort_keys=True, ensure_ascii=False)
return hashlib.md5(args_str.encode()).hexdigest()[:8]
def _check_consecutive_failures(self, tool_name: str, args: dict) -> Tuple[bool, str, bool]:
args_hash = self._hash_args(args)
# Count consecutive calls (both success and failure) for same tool + args
# This catches infinite loops where tool succeeds but LLM keeps calling it
same_args_calls = 0
for name, ahash, success in reversed(self.tool_failure_history):
if name == tool_name and ahash == args_hash:
same_args_calls += 1
else:
break # Different tool or args, stop counting
# Stop at 5 consecutive calls with same args (whether success or failure)
if same_args_calls >= 5:
return True, f"工具 '{tool_name}' 使用相同参数已被调用 {same_args_calls} 次,停止执行以防止无限循环。如果需要查看配置,结果已在之前的调用中返回。", False
# Count consecutive failures for same tool + args
same_args_failures = 0
for name, ahash, success in reversed(self.tool_failure_history):
if name == tool_name and ahash == args_hash:
if not success:
same_args_failures += 1
else:
break # Stop at first success
else:
break # Different tool or args, stop counting
if same_args_failures >= 3:
return True, f"工具 '{tool_name}' 使用相同参数连续失败 {same_args_failures} 次,停止执行以防止无限循环", False
# Count consecutive failures for same tool (any args)
same_tool_failures = 0
for name, ahash, success in reversed(self.tool_failure_history):
if name == tool_name:
if not success:
same_tool_failures += 1
else:
break # Stop at first success
else:
break # Different tool, stop counting
# Hard stop at 8 failures - abort with critical message
if same_tool_failures >= 8:
return True, f"抱歉,我没能完成这个任务。可能是我理解有误或者当前方法不太合适。\n\n建议你:\n• 换个方式描述需求试试\n• 把任务拆分成更小的步骤\n• 或者换个思路来解决", True
# Warning at 6 failures
if same_tool_failures >= 6:
return True, f"工具 '{tool_name}' 连续失败 {same_tool_failures} 次(使用不同参数),停止执行以防止无限循环", False
return False, "", False
def _record_tool_result(self, tool_name: str, args: dict, success: bool):
args_hash = self._hash_args(args)
self.tool_failure_history.append((tool_name, args_hash, success))
# Keep only last 50 records to avoid memory bloat
if len(self.tool_failure_history) > 50:
self.tool_failure_history = self.tool_failure_history[-50:]
def run_stream(self, user_message: str) -> str:
# Log user message with model info
logger.info(f"🤖 {self.model.model} | 👤 {user_message}")
# Add user message (Claude format - use content blocks for consistency)
self.messages.append({
"role": "user",
"content": [
{
"type": "text",
"text": user_message
}
]
})
# Trim context ONCE before the agent loop starts, not during tool steps.
# This ensures tool_use/tool_result chains created during the current run
# are never stripped mid-execution (which would cause LLM loops).
self._trim_messages()
# Validate after trimming: trimming may leave orphaned tool_use at the
# boundary (e.g. the last kept turn ends with an assistant tool_use whose
# tool_result was in a discarded turn).
self._validate_and_fix_messages()
self._emit_event("agent_start")
final_response = ""
turn = 0
try:
while turn < self.max_turns:
turn += 1
logger.info(f"[Agent] 第 {turn} 轮")
self._emit_event("turn_start", {"turn": turn})
# Call LLM (enable retry_on_empty for better reliability)
assistant_msg, tool_calls = self._call_llm_stream(retry_on_empty=True)
final_response = assistant_msg
# No tool calls, end loop
if not tool_calls:
# 检查是否返回了空响应
if not assistant_msg:
logger.warning(f"[Agent] LLM returned empty response after retry (no content and no tool calls)")
logger.info(f"[Agent] This usually happens when LLM thinks the task is complete after tool execution")
# 如果之前有工具调用,强制要求 LLM 生成文本回复
if turn > 1:
logger.info(f"[Agent] Requesting explicit response from LLM...")
# 添加一条消息,明确要求回复用户
self.messages.append({
"role": "user",
"content": [{
"type": "text",
"text": "请向用户说明刚才工具执行的结果或回答用户的问题。"
}]
})
# 再调用一次 LLM
assistant_msg, tool_calls = self._call_llm_stream(retry_on_empty=False)
final_response = assistant_msg
# 如果还是空,才使用 fallback
if not assistant_msg and not tool_calls:
logger.warning(f"[Agent] Still empty after explicit request")
final_response = (
"抱歉,我暂时无法生成回复。请尝试换一种方式描述你的需求,或稍后再试。"
)
logger.info(f"Generated fallback response for empty LLM output")
else:
# 第一轮就空回复,直接 fallback
final_response = (
"抱歉,我暂时无法生成回复。请尝试换一种方式描述你的需求,或稍后再试。"
)
logger.info(f"Generated fallback response for empty LLM output")
else:
logger.info(f"💭 {assistant_msg[:150]}{'...' if len(assistant_msg) > 150 else ''}")
logger.debug(f"✅ 完成 (无工具调用)")
self._emit_event("turn_end", {
"turn": turn,
"has_tool_calls": False
})
break
# Log tool calls with arguments
tool_calls_str = []
for tc in tool_calls:
# Safely handle None or missing arguments
args = tc.get('arguments') or {}
if isinstance(args, dict):
args_str = ', '.join([f"{k}={v}" for k, v in args.items()])
if args_str:
tool_calls_str.append(f"{tc['name']}({args_str})")
else:
tool_calls_str.append(tc['name'])
else:
tool_calls_str.append(tc['name'])
logger.info(f"🔧 {', '.join(tool_calls_str)}")
# Execute tools
tool_results = []
tool_result_blocks = []
try:
for tool_call in tool_calls:
result = self._execute_tool(tool_call)
tool_results.append(result)
# Debug: Check if tool is being called repeatedly with same args
if turn > 2:
# Check last N tool calls for repeats
repeat_count = sum(
1 for name, ahash, _ in self.tool_failure_history[-10:]
if name == tool_call["name"] and ahash == self._hash_args(tool_call["arguments"])
)
if repeat_count >= 3:
logger.warning(
f"⚠️ Tool '{tool_call['name']}' has been called {repeat_count} times "
f"with same arguments. This may indicate a loop."
)
# Check if this is a file to send (from read tool)
if result.get("status") == "success" and isinstance(result.get("result"), dict):
result_data = result.get("result")
if result_data.get("type") == "file_to_send":
# Store file metadata for later sending
self.files_to_send.append(result_data)
logger.info(f"📎 检测到待发送文件: {result_data.get('file_name', result_data.get('path'))}")
# Check for critical error - abort entire conversation
if result.get("status") == "critical_error":
logger.error(f"💥 检测到严重错误,终止对话")
final_response = result.get('result', '任务执行失败')
return final_response
# Log tool result in compact format
status_emoji = "✅" if result.get("status") == "success" else "❌"
result_data = result.get('result', '')
# Format result string with proper Chinese character support
if isinstance(result_data, (dict, list)):
result_str = json.dumps(result_data, ensure_ascii=False)
else:
result_str = str(result_data)
logger.info(f" {status_emoji} {tool_call['name']} ({result.get('execution_time', 0):.2f}s): {result_str[:200]}{'...' if len(result_str) > 200 else ''}")
# Build tool result block (Claude format)
# Format content in a way that's easy for LLM to understand
is_error = result.get("status") == "error"
if is_error:
# For errors, provide clear error message
result_content = f"Error: {result.get('result', 'Unknown error')}"
elif isinstance(result.get('result'), dict):
# For dict results, use JSON format
result_content = json.dumps(result.get('result'), ensure_ascii=False)
elif isinstance(result.get('result'), str):
# For string results, use directly
result_content = result.get('result')
else:
# Fallback to full JSON
result_content = json.dumps(result, ensure_ascii=False)
# Truncate excessively large tool results for the current turn
# Historical turns will be further truncated in _trim_messages()
MAX_CURRENT_TURN_RESULT_CHARS = 50000
if len(result_content) > MAX_CURRENT_TURN_RESULT_CHARS:
truncated_len = len(result_content)
result_content = result_content[:MAX_CURRENT_TURN_RESULT_CHARS] + \
f"\n\n[Output truncated: {truncated_len} chars total, showing first {MAX_CURRENT_TURN_RESULT_CHARS} chars]"
logger.info(f"📎 Truncated tool result for '{tool_call['name']}': {truncated_len} -> {MAX_CURRENT_TURN_RESULT_CHARS} chars")
tool_result_block = {
"type": "tool_result",
"tool_use_id": tool_call["id"],
"content": result_content
}
# Add is_error field for Claude API (helps model understand failures)
if is_error:
tool_result_block["is_error"] = True
tool_result_blocks.append(tool_result_block)
finally:
# CRITICAL: Always add tool_result to maintain message history integrity
# Even if tool execution fails, we must add error results to match tool_use
if tool_result_blocks:
# Add tool results to message history as user message (Claude format)
self.messages.append({
"role": "user",
"content": tool_result_blocks
})
# Detect potential infinite loop: same tool called multiple times with success
# If detected, add a hint to LLM to stop calling tools and provide response
if turn >= 3 and len(tool_calls) > 0:
tool_name = tool_calls[0]["name"]
args_hash = self._hash_args(tool_calls[0]["arguments"])
# Count recent successful calls with same tool+args
recent_success_count = 0
for name, ahash, success in reversed(self.tool_failure_history[-10:]):
if name == tool_name and ahash == args_hash and success:
recent_success_count += 1
# If tool was called successfully 3+ times with same args, add hint to stop loop
if recent_success_count >= 3:
logger.warning(
f"⚠️ Detected potential loop: '{tool_name}' called {recent_success_count} times "
f"with same args. Adding hint to LLM to provide final response."
)
# Add a gentle hint message to guide LLM to respond
self.messages.append({
"role": "user",
"content": [{
"type": "text",
"text": "工具已成功执行并返回结果。请基于这些信息向用户做出回复,不要重复调用相同的工具。"
}]
})
elif tool_calls:
# If we have tool_calls but no tool_result_blocks (unexpected error),
# create error results for all tool calls to maintain message integrity
logger.warning("⚠️ Tool execution interrupted, adding error results to maintain message history")
emergency_blocks = []
for tool_call in tool_calls:
emergency_blocks.append({
"type": "tool_result",
"tool_use_id": tool_call["id"],
"content": "Error: Tool execution was interrupted",
"is_error": True
})
self.messages.append({
"role": "user",
"content": emergency_blocks
})
self._emit_event("turn_end", {
"turn": turn,
"has_tool_calls": True,
"tool_count": len(tool_calls)
})
if turn >= self.max_turns:
logger.warning(f"⚠️ 已达到最大决策步数限制: {self.max_turns}")
# Force model to summarize without tool calls
logger.info(f"[Agent] Requesting summary from LLM after reaching max steps...")
# Remember position before injecting the prompt so we can remove it later
prompt_insert_idx = len(self.messages)
# Add a temporary prompt to force summary
self.messages.append({
"role": "user",
"content": [{
"type": "text",
"text": f"你已经执行了{turn}个决策步骤,达到了单次运行的最大步数限制。请总结一下你目前的执行过程和结果,告诉用户当前的进展情况。不要再调用工具,直接用文字回复。"
}]
})
# Call LLM one more time to get summary (without retry to avoid loops)
try:
summary_response, summary_tools = self._call_llm_stream(retry_on_empty=False)
if summary_response:
final_response = summary_response
logger.info(f"💭 Summary: {summary_response[:150]}{'...' if len(summary_response) > 150 else ''}")
else:
# Fallback if model still doesn't respond
final_response = (
f"我已经执行了{turn}个决策步骤,达到了单次运行的步数上限。"
"任务可能还未完全完成,建议你将任务拆分成更小的步骤,或者换一种方式描述需求。"
)
except Exception as e:
logger.warning(f"Failed to get summary from LLM: {e}")
final_response = (
f"我已经执行了{turn}个决策步骤,达到了单次运行的步数上限。"
"任务可能还未完全完成,建议你将任务拆分成更小的步骤,或者换一种方式描述需求。"
)
finally:
# Remove the injected user prompt from history to avoid polluting
# persisted conversation records. The assistant summary (if any)
# was already appended by _call_llm_stream and is kept.
if (prompt_insert_idx < len(self.messages)
and self.messages[prompt_insert_idx].get("role") == "user"):
self.messages.pop(prompt_insert_idx)
logger.debug("[Agent] Removed injected max-steps prompt from message history")
except Exception as e:
logger.error(f"❌ Agent执行错误: {e}")
self._emit_event("error", {"error": str(e)})
raise
finally:
logger.info(f"[Agent] 🏁 完成 ({turn}轮)")
self._emit_event("agent_end", {"final_response": final_response})
return final_response
def _call_llm_stream(self, retry_on_empty=True, retry_count=0, max_retries=3,
_overflow_retry: bool = False) -> Tuple[str, List[Dict]]:
# Validate and fix message history (e.g. orphaned tool_result blocks).
# Context trimming is done once in run_stream() before the loop starts,
# NOT here — trimming mid-execution would strip the current run's
# tool_use/tool_result chains and cause LLM loops.
self._validate_and_fix_messages()
# Prepare messages
messages = self._prepare_messages()
turns = self._identify_complete_turns()
logger.info(f"Sending {len(messages)} messages ({len(turns)} turns) to LLM")
# Prepare tool definitions (OpenAI/Claude format)
tools_schema = None
if self.tools:
tools_schema = []
for tool in self.tools.values():
tools_schema.append({
"name": tool.name,
"description": tool.description,
"input_schema": tool.params # Claude uses input_schema
})
# Create request
request = LLMRequest(
messages=messages,
temperature=0,
stream=True,
tools=tools_schema,
system=self.system_prompt # Pass system prompt separately for Claude API
)
self._emit_event("message_start", {"role": "assistant"})
# Streaming response
full_content = ""
tool_calls_buffer = {} # {index: {id, name, arguments}}
gemini_raw_parts = None # Preserve Gemini thoughtSignature for round-trip
stop_reason = None # Track why the stream stopped
try:
stream = self.model.call_stream(request)
for chunk in stream:
# Check for errors
if isinstance(chunk, dict) and chunk.get("error"):
# Extract error message from nested structure
error_data = chunk.get("error", {})
if isinstance(error_data, dict):
error_msg = error_data.get("message", chunk.get("message", "Unknown error"))
error_code = error_data.get("code", "")
error_type = error_data.get("type", "")
else:
error_msg = chunk.get("message", str(error_data))
error_code = ""
error_type = ""
status_code = chunk.get("status_code", "N/A")
# Log error with all available information
logger.error(f"🔴 Stream API Error:")
logger.error(f" Message: {error_msg}")
logger.error(f" Status Code: {status_code}")
logger.error(f" Error Code: {error_code}")
logger.error(f" Error Type: {error_type}")
logger.error(f" Full chunk: {chunk}")
# Check if this is a context overflow error (keyword-based, works for all models)
# Don't rely on specific status codes as different providers use different codes
error_msg_lower = error_msg.lower()
is_overflow = any(keyword in error_msg_lower for keyword in [
'context length exceeded', 'maximum context length', 'prompt is too long',
'context overflow', 'context window', 'too large', 'exceeds model context',
'request_too_large', 'request exceeds the maximum size', 'tokens exceed'
])
if is_overflow:
# Mark as context overflow for special handling
raise Exception(f"[CONTEXT_OVERFLOW] {error_msg} (Status: {status_code})")
else:
# Raise exception with full error message for retry logic
raise Exception(f"{error_msg} (Status: {status_code}, Code: {error_code}, Type: {error_type})")
# Parse chunk
if isinstance(chunk, dict) and chunk.get("choices"):
choice = chunk["choices"][0]
delta = choice.get("delta", {})
# Capture finish_reason if present
finish_reason = choice.get("finish_reason")
if finish_reason:
stop_reason = finish_reason
# Skip reasoning_content (internal thinking from models like GLM-5)
reasoning_delta = delta.get("reasoning_content") or ""
# if reasoning_delta:
# logger.debug(f"🧠 [thinking] {reasoning_delta[:100]}...")
# Handle text content
content_delta = delta.get("content") or ""
if content_delta:
# Filter out <think> tags from content
filtered_delta = self._filter_think_tags(content_delta)
full_content += filtered_delta
if filtered_delta: # Only emit if there's content after filtering
self._emit_event("message_update", {"delta": filtered_delta})
# Handle tool calls
if "tool_calls" in delta and delta["tool_calls"]:
for tc_delta in delta["tool_calls"]:
index = tc_delta.get("index", 0)
if index not in tool_calls_buffer:
tool_calls_buffer[index] = {
"id": "",
"name": "",
"arguments": ""
}
if tc_delta.get("id"):
tool_calls_buffer[index]["id"] = tc_delta["id"]
if "function" in tc_delta:
func = tc_delta["function"]
if func.get("name"):
tool_calls_buffer[index]["name"] = func["name"]
if func.get("arguments"):
tool_calls_buffer[index]["arguments"] += func["arguments"]
# Preserve _gemini_raw_parts for Gemini thoughtSignature round-trip
if "_gemini_raw_parts" in delta:
gemini_raw_parts = delta["_gemini_raw_parts"]
except Exception as e:
error_str = str(e)
error_str_lower = error_str.lower()
# Check if error is context overflow (non-retryable, needs session reset)
# Method 1: Check for special marker (set in stream error handling above)
is_context_overflow = '[context_overflow]' in error_str_lower
# Method 2: Fallback to keyword matching for non-stream errors
if not is_context_overflow:
is_context_overflow = any(keyword in error_str_lower for keyword in [
'context length exceeded', 'maximum context length', 'prompt is too long',
'context overflow', 'context window', 'too large', 'exceeds model context',
'request_too_large', 'request exceeds the maximum size'
])
# Check if error is message format error (incomplete tool_use/tool_result pairs)
# This happens when previous conversation had tool failures or context trimming
# broke tool_use/tool_result pairs.
# Note: MiniMax returns error 2013 "tool result's tool id(...) not found" for
# tool_call_id mismatches — the keywords below are intentionally broad to catch
# both standard (Claude/OpenAI) and provider-specific (MiniMax) variants.
is_message_format_error = any(keyword in error_str_lower for keyword in [
'tool_use', 'tool_result', 'tool result', 'without', 'immediately after',
'corresponding', 'must have', 'each',
'tool_call_id', 'tool id', 'is not found', 'not found', 'tool_calls',
'must be a response to a preceeding message',
'2013', # MiniMax error code for tool_call_id mismatch
]) and ('400' in error_str_lower or 'status: 400' in error_str_lower
or 'invalid_request' in error_str_lower
or 'invalidparameter' in error_str_lower)
if is_context_overflow or is_message_format_error:
error_type = "context overflow" if is_context_overflow else "message format error"
logger.error(f"💥 {error_type} detected: {e}")
# Flush memory before trimming to preserve context that will be lost
if is_context_overflow and self.agent.memory_manager:
user_id = getattr(self.agent, '_current_user_id', None)
self.agent.memory_manager.flush_memory(
messages=self.messages, user_id=user_id,
reason="overflow", max_messages=0
)
# Strategy: try aggressive trimming first, only clear as last resort
if is_context_overflow and not _overflow_retry:
trimmed = self._aggressive_trim_for_overflow()
if trimmed:
logger.warning("🔄 Aggressively trimmed context, retrying...")
return self._call_llm_stream(
retry_on_empty=retry_on_empty,
retry_count=retry_count,
max_retries=max_retries,
_overflow_retry=True
)
# Aggressive trim didn't help or this is a message format error
# -> clear everything and also purge DB to prevent reload of dirty data
logger.warning("🔄 Clearing conversation history to recover")
self.messages.clear()
self._clear_session_db()
if is_context_overflow:
raise Exception(
"抱歉,对话历史过长导致上下文溢出。我已清空历史记录,请重新描述你的需求。"
)
else:
raise Exception(
"抱歉,之前的对话出现了问题。我已清空历史记录,请重新发送你的消息。"
)
# Check if error is rate limit (429)
is_rate_limit = '429' in error_str_lower or 'rate limit' in error_str_lower
# Check if error is retryable (timeout, connection, server busy, etc.)
is_retryable = any(keyword in error_str_lower for keyword in [
'timeout', 'timed out', 'connection', 'network',
'rate limit', 'overloaded', 'unavailable', 'busy', 'retry',
'429', '500', '502', '503', '504', '512'
])
if is_retryable and retry_count < max_retries:
# Rate limit needs longer wait time
if is_rate_limit:
wait_time = 30 + (retry_count * 15) # 30s, 45s, 60s for rate limit
else:
wait_time = (retry_count + 1) * 2 # 2s, 4s, 6s for other errors
logger.warning(f"⚠️ LLM API error (attempt {retry_count + 1}/{max_retries}): {e}")
logger.info(f"Retrying in {wait_time}s...")
time.sleep(wait_time)
return self._call_llm_stream(
retry_on_empty=retry_on_empty,
retry_count=retry_count + 1,
max_retries=max_retries
)
else:
if retry_count >= max_retries:
logger.error(f"❌ LLM API error after {max_retries} retries: {e}", exc_info=True)
else:
logger.error(f"❌ LLM call error (non-retryable): {e}", exc_info=True)
raise
# Parse tool calls
tool_calls = []
for idx in sorted(tool_calls_buffer.keys()):
tc = tool_calls_buffer[idx]
# Ensure tool call has a valid ID (some providers return empty/None IDs)
tool_id = tc.get("id") or ""
if not tool_id:
import uuid
tool_id = f"call_{uuid.uuid4().hex[:24]}"
try:
# Safely get arguments, handle None case
args_str = tc.get("arguments") or ""
arguments = json.loads(args_str) if args_str else {}
except json.JSONDecodeError as e:
# Handle None or invalid arguments safely
args_str = tc.get('arguments') or ""
args_preview = args_str[:200] if len(args_str) > 200 else args_str
logger.error(f"Failed to parse tool arguments for {tc['name']}")
logger.error(f"Arguments length: {len(args_str)} chars")
logger.error(f"Arguments preview: {args_preview}...")
logger.error(f"JSON decode error: {e}")
# Return a clear error message to the LLM instead of empty dict
# This helps the LLM understand what went wrong
tool_calls.append({
"id": tool_id,
"name": tc["name"],
"arguments": {},
"_parse_error": f"Invalid JSON in tool arguments: {args_preview}... Error: {str(e)}. Tip: For large content, consider splitting into smaller chunks or using a different approach."
})
continue
tool_calls.append({
"id": tool_id,
"name": tc["name"],
"arguments": arguments
})
# Check for empty response and retry once if enabled
if retry_on_empty and not full_content and not tool_calls:
logger.warning(f"⚠️ LLM returned empty response (stop_reason: {stop_reason}), retrying once...")
self._emit_event("message_end", {
"content": "",
"tool_calls": [],
"empty_retry": True,
"stop_reason": stop_reason
})
# Retry without retry flag to avoid infinite loop
return self._call_llm_stream(
retry_on_empty=False,
retry_count=retry_count,
max_retries=max_retries
)
# Filter full_content one more time (in case tags were split across chunks)
full_content = self._filter_think_tags(full_content)
# Add assistant message to history (Claude format uses content blocks)
assistant_msg = {"role": "assistant", "content": []}
# Add text content block if present
if full_content:
assistant_msg["content"].append({
"type": "text",
"text": full_content
})
# Add tool_use blocks if present
if tool_calls:
for tc in tool_calls:
assistant_msg["content"].append({
"type": "tool_use",
"id": tc.get("id", ""),
"name": tc.get("name", ""),
"input": tc.get("arguments", {})
})
if gemini_raw_parts:
assistant_msg["_gemini_raw_parts"] = gemini_raw_parts
# Only append if content is not empty
if assistant_msg["content"]:
self.messages.append(assistant_msg)
self._emit_event("message_end", {
"content": full_content,
"tool_calls": tool_calls
})
return full_content, tool_calls
def _execute_tool(self, tool_call: Dict) -> Dict[str, Any]:
tool_name = tool_call["name"]
tool_id = tool_call["id"]
arguments = tool_call["arguments"]
# Check if there was a JSON parse error
if "_parse_error" in tool_call:
parse_error = tool_call["_parse_error"]
logger.error(f"Skipping tool execution due to parse error: {parse_error}")
result = {
"status": "error",
"result": f"Failed to parse tool arguments. {parse_error}. Please ensure your tool call uses valid JSON format with all required parameters.",
"execution_time": 0
}
self._record_tool_result(tool_name, arguments, False)
return result
# Check for consecutive failures (retry protection)
should_stop, stop_reason, is_critical = self._check_consecutive_failures(tool_name, arguments)
if should_stop:
logger.error(f"🛑 {stop_reason}")
self._record_tool_result(tool_name, arguments, False)
if is_critical:
# Critical failure - abort entire conversation
result = {
"status": "critical_error",
"result": stop_reason,
"execution_time": 0
}
else:
# Normal failure - let LLM try different approach
result = {
"status": "error",
"result": f"{stop_reason}\n\n当前方法行不通,请尝试完全不同的方法或向用户询问更多信息。",
"execution_time": 0
}
return result
self._emit_event("tool_execution_start", {
"tool_call_id": tool_id,
"tool_name": tool_name,
"arguments": arguments
})
try:
tool = self.tools.get(tool_name)
if not tool:
raise ValueError(self._build_tool_not_found_message(tool_name))
# Set tool context
tool.model = self.model
tool.context = self.agent
# Execute tool
start_time = time.time()
result: ToolResult = tool.execute_tool(arguments)
execution_time = time.time() - start_time
result_dict = {
"status": result.status,
"result": result.result,
"execution_time": execution_time
}
# Record tool result for failure tracking
success = result.status == "success"
self._record_tool_result(tool_name, arguments, success)
# Auto-refresh skills after skill creation
if tool_name == "bash" and result.status == "success":
command = arguments.get("command", "")
if "init_skill.py" in command and self.agent.skill_manager:
logger.info("Detected skill creation, refreshing skills...")
self.agent.refresh_skills()
logger.info(f"Skills refreshed! Now have {len(self.agent.skill_manager.skills)} skills")
self._emit_event("tool_execution_end", {
"tool_call_id": tool_id,
"tool_name": tool_name,
**result_dict
})
return result_dict
except Exception as e:
logger.error(f"Tool execution error: {e}")
error_result = {
"status": "error",
"result": str(e),
"execution_time": 0
}
# Record failure
self._record_tool_result(tool_name, arguments, False)
self._emit_event("tool_execution_end", {
"tool_call_id": tool_id,
"tool_name": tool_name,
**error_result
})
return error_result
def _build_tool_not_found_message(self, tool_name: str) -> str:
available_tools = list(self.tools.keys())
base_msg = f"Tool '{tool_name}' not found. Available tools: {available_tools}"
skill_manager = getattr(self.agent, 'skill_manager', None)
if not skill_manager:
return base_msg
skill_entry = skill_manager.get_skill(tool_name)
if not skill_entry:
return base_msg
skill = skill_entry.skill
skill_md_path = skill.file_path
skill_content = ""
try:
with open(skill_md_path, 'r', encoding='utf-8') as f:
skill_content = f.read()
except Exception:
skill_content = skill.description
logger.info(
f"[Agent] Tool '{tool_name}' not found, but matched skill '{skill.name}'. "
f"Guiding LLM to use the skill instead."
)
return (
f"Tool '{tool_name}' is not a built-in tool, but a matching skill "
f"'{skill.name}' is available. You should use existing tools (e.g. bash with curl) "
f"to accomplish this task following the skill instructions below:\n\n"
f"--- SKILL: {skill.name} (path: {skill_md_path}) ---\n"
f"{skill_content}\n"
f"--- END SKILL ---\n\n"
f"Available tools: {available_tools}"
)
def _validate_and_fix_messages(self):
sanitize_claude_messages(self.messages)
def _identify_complete_turns(self) -> List[Dict]:
turns = []
current_turn = {'messages': []}
for msg in self.messages:
role = msg.get('role')
content = msg.get('content', [])
if role == 'user':
# Determine if this is a real user query (not a tool_result injection
# or an internal hint message injected by the agent loop).
is_user_query = False
has_tool_result = False
if isinstance(content, list):
has_text = any(
isinstance(block, dict) and block.get('type') == 'text'
for block in content
)
has_tool_result = any(
isinstance(block, dict) and block.get('type') == 'tool_result'
for block in content
)
# A message with tool_result is always internal, even if it
# also contains text blocks (shouldn't happen, but be safe).
is_user_query = has_text and not has_tool_result
elif isinstance(content, str):
is_user_query = True
if is_user_query:
if current_turn['messages']:
turns.append(current_turn)
current_turn = {'messages': [msg]}
else:
current_turn['messages'].append(msg)
else:
# AI 回复,属于当前轮次
current_turn['messages'].append(msg)
# 添加最后一个轮次
if current_turn['messages']:
turns.append(current_turn)
return turns
def _estimate_turn_tokens(self, turn: Dict) -> int:
return sum(
self.agent._estimate_message_tokens(msg)
for msg in turn['messages']
)
def _truncate_historical_tool_results(self):
MAX_HISTORY_RESULT_CHARS = 20000
if len(self.messages) < 2:
return
# Find where the last user text message starts (= current turn boundary)
# We skip the current turn's messages to preserve their full content
current_turn_start = len(self.messages)
for i in range(len(self.messages) - 1, -1, -1):
msg = self.messages[i]
if msg.get("role") == "user":
content = msg.get("content", [])
if isinstance(content, list) and any(
isinstance(b, dict) and b.get("type") == "text" for b in content
):
current_turn_start = i
break
elif isinstance(content, str):
current_turn_start = i
break
truncated_count = 0
for i in range(current_turn_start):
msg = self.messages[i]
if msg.get("role") != "user":
continue
content = msg.get("content", [])
if not isinstance(content, list):
continue
for block in content:
if not isinstance(block, dict) or block.get("type") != "tool_result":
continue
result_str = block.get("content", "")
if isinstance(result_str, str) and len(result_str) > MAX_HISTORY_RESULT_CHARS:
original_len = len(result_str)
block["content"] = result_str[:MAX_HISTORY_RESULT_CHARS] + \
f"\n\n[Historical output truncated: {original_len} -> {MAX_HISTORY_RESULT_CHARS} chars]"
truncated_count += 1
if truncated_count > 0:
logger.info(f"📎 Truncated {truncated_count} historical tool result(s) to {MAX_HISTORY_RESULT_CHARS} chars")
def _aggressive_trim_for_overflow(self) -> bool:
if not self.messages:
return False
original_count = len(self.messages)
# Step 1: Aggressively truncate ALL tool results to 5K chars
AGGRESSIVE_LIMIT = 10000
truncated = 0
for msg in self.messages:
content = msg.get("content", [])
if not isinstance(content, list):
continue
for block in content:
if not isinstance(block, dict):
continue
# Truncate tool_result blocks
if block.get("type") == "tool_result":
result_str = block.get("content", "")
if isinstance(result_str, str) and len(result_str) > AGGRESSIVE_LIMIT:
block["content"] = (
result_str[:AGGRESSIVE_LIMIT]
+ f"\n\n[Truncated for context recovery: "
f"{len(result_str)} -> {AGGRESSIVE_LIMIT} chars]"
)
truncated += 1
# Truncate tool_use input blocks (e.g. large write content)
if block.get("type") == "tool_use" and isinstance(block.get("input"), dict):
input_str = json.dumps(block["input"], ensure_ascii=False)
if len(input_str) > AGGRESSIVE_LIMIT:
# Keep only a summary of the input
for key, val in block["input"].items():
if isinstance(val, str) and len(val) > 1000:
block["input"][key] = (
val[:1000]
+ f"... [truncated {len(val)} chars]"
)
truncated += 1
# Step 2: Truncate overly long user text messages (e.g. pasted content)
USER_MSG_LIMIT = 10000
for msg in self.messages:
if msg.get("role") != "user":
continue
content = msg.get("content", [])
if isinstance(content, list):
for block in content:
if isinstance(block, dict) and block.get("type") == "text":
text = block.get("text", "")
if len(text) > USER_MSG_LIMIT:
block["text"] = (
text[:USER_MSG_LIMIT]
+ f"\n\n[Message truncated for context recovery: "
f"{len(text)} -> {USER_MSG_LIMIT} chars]"
)
truncated += 1
elif isinstance(content, str) and len(content) > USER_MSG_LIMIT:
msg["content"] = (
content[:USER_MSG_LIMIT]
+ f"\n\n[Message truncated for context recovery: "
f"{len(content)} -> {USER_MSG_LIMIT} chars]"
)
truncated += 1
# Step 3: Keep only the last 5 complete turns
turns = self._identify_complete_turns()
if len(turns) > 5:
kept_turns = turns[-5:]
new_messages = []
for turn in kept_turns:
new_messages.extend(turn["messages"])
removed = len(turns) - 5
self.messages[:] = new_messages
logger.info(
f"🔧 Aggressive trim: removed {removed} old turns, "
f"truncated {truncated} large blocks, "
f"{original_count} -> {len(self.messages)} messages"
)
return True
if truncated > 0:
logger.info(
f"🔧 Aggressive trim: truncated {truncated} large blocks "
f"(no turns removed, only {len(turns)} turn(s) left)"
)
return True
# Nothing left to trim
logger.warning("🔧 Aggressive trim: nothing to trim, will clear history")
return False
def _trim_messages(self):
if not self.messages or not self.agent:
return
# Step 0: Truncate large tool results in historical turns (30K -> 10K)
self._truncate_historical_tool_results()
# Step 1: 识别完整轮次
turns = self._identify_complete_turns()
if not turns:
return
# Step 2: 轮次限制 - 超出时移除前一半,保留后一半
if len(turns) > self.max_context_turns:
removed_count = len(turns) // 2
keep_count = len(turns) - removed_count
# Flush discarded turns to daily memory
if self.agent.memory_manager:
discarded_messages = []
for turn in turns[:removed_count]:
discarded_messages.extend(turn["messages"])
if discarded_messages:
user_id = getattr(self.agent, '_current_user_id', None)
self.agent.memory_manager.flush_memory(
messages=discarded_messages, user_id=user_id,
reason="trim", max_messages=0
)
turns = turns[-keep_count:]
logger.info(
f"💾 上下文轮次超限: {keep_count + removed_count} > {self.max_context_turns},"
f"裁剪至 {keep_count} 轮(移除 {removed_count} 轮)"
)
# Step 3: Token 限制 - 保留完整轮次
# Get context window from agent (based on model)
context_window = self.agent._get_model_context_window()
# Use configured max_context_tokens if available
if hasattr(self.agent, 'max_context_tokens') and self.agent.max_context_tokens:
max_tokens = self.agent.max_context_tokens
else:
# Reserve 10% for response generation
reserve_tokens = int(context_window * 0.1)
max_tokens = context_window - reserve_tokens
# Estimate system prompt tokens
system_tokens = self.agent._estimate_message_tokens({"role": "system", "content": self.system_prompt})
available_tokens = max_tokens - system_tokens
# Calculate current tokens
current_tokens = sum(self._estimate_turn_tokens(turn) for turn in turns)
# If under limit, reconstruct messages and return
if current_tokens + system_tokens <= max_tokens:
# Reconstruct message list from turns
new_messages = []
for turn in turns:
new_messages.extend(turn['messages'])
old_count = len(self.messages)
self.messages = new_messages
# Log if we removed messages due to turn limit
if old_count > len(self.messages):
logger.info(f" 重建消息列表: {old_count} -> {len(self.messages)} 条消息")
return
# Token limit exceeded — tiered strategy based on turn count:
#
# Few turns (<5): Compress ALL turns to text-only (strip tool chains,
# keep user query + final reply). Never discard turns
# — losing even one is too painful when context is thin.
#
# Many turns (>=5): Directly discard the first half of turns.
# With enough turns the oldest ones are less
# critical, and keeping the recent half intact
# (with full tool chains) is more useful.
COMPRESS_THRESHOLD = 5
if len(turns) < COMPRESS_THRESHOLD:
# --- Few turns: compress ALL turns to text-only, never discard ---
compressed_turns = []
for t in turns:
compressed = compress_turn_to_text_only(t)
if compressed["messages"]:
compressed_turns.append(compressed)
new_messages = []
for turn in compressed_turns:
new_messages.extend(turn["messages"])
new_tokens = sum(self._estimate_turn_tokens(t) for t in compressed_turns)
old_count = len(self.messages)
self.messages = new_messages
logger.info(
f"📦 上下文tokens超限(轮次<{COMPRESS_THRESHOLD}): "
f"~{current_tokens + system_tokens} > {max_tokens},"
f"压缩全部 {len(turns)} 轮为纯文本 "
f"({old_count} -> {len(self.messages)} 条消息,"
f"~{current_tokens + system_tokens} -> ~{new_tokens + system_tokens} tokens)"
)
return
# --- Many turns (>=5): discard the older half, keep the newer half ---
removed_count = len(turns) // 2
keep_count = len(turns) - removed_count
kept_turns = turns[-keep_count:]
kept_tokens = sum(self._estimate_turn_tokens(t) for t in kept_turns)
logger.info(
f"🔄 上下文tokens超限: ~{current_tokens + system_tokens} > {max_tokens},"
f"裁剪至 {keep_count} 轮(移除 {removed_count} 轮)"
)
if self.agent.memory_manager:
discarded_messages = []
for turn in turns[:removed_count]:
discarded_messages.extend(turn["messages"])
if discarded_messages:
user_id = getattr(self.agent, '_current_user_id', None)
self.agent.memory_manager.flush_memory(
messages=discarded_messages, user_id=user_id,
reason="trim", max_messages=0
)
new_messages = []
for turn in kept_turns:
new_messages.extend(turn['messages'])
old_count = len(self.messages)
self.messages = new_messages
logger.info(
f" 移除了 {removed_count} 轮对话 "
f"({old_count} -> {len(self.messages)} 条消息,"
f"~{current_tokens + system_tokens} -> ~{kept_tokens + system_tokens} tokens)"
)
def _clear_session_db(self):
try:
session_id = getattr(self.agent, '_current_session_id', None)
if not session_id:
return
from agent.memory import get_conversation_store
store = get_conversation_store()
store.clear_session(session_id)
logger.info(f"🗑️ Cleared dirty session data from DB: {session_id}")
except Exception as e:
logger.warning(f"Failed to clear session DB: {e}")
def _prepare_messages(self) -> List[Dict[str, Any]]:
# Don't add system message here - it will be handled separately by the LLM adapter
return self.messages | --- +++ @@ -1,3 +1,8 @@+"""
+Agent Stream Execution Module - Multi-turn reasoning based on tool-call
+
+Provides streaming output, event system, and complete tool-call loop
+"""
import json
import time
from typing import List, Dict, Any, Optional, Callable, Tuple
@@ -9,6 +14,15 @@
class AgentStreamExecutor:
+ """
+ Agent Stream Executor
+
+ Handles multi-turn reasoning loop based on tool-call:
+ 1. LLM generates response (may include tool calls)
+ 2. Execute tools
+ 3. Return results to LLM
+ 4. Repeat until no more tool calls
+ """
def __init__(
self,
@@ -21,6 +35,19 @@ messages: Optional[List[Dict]] = None,
max_context_turns: int = 30
):
+ """
+ Initialize stream executor
+
+ Args:
+ agent: Agent instance (for accessing context)
+ model: LLM model
+ system_prompt: System prompt
+ tools: List of available tools
+ max_turns: Maximum number of turns
+ on_event: Event callback function
+ messages: Optional existing message history (for persistent conversations)
+ max_context_turns: Maximum number of conversation turns to keep in context
+ """
self.agent = agent
self.model = model
self.system_prompt = system_prompt
@@ -40,6 +67,7 @@ self.files_to_send = [] # List of file metadata dicts
def _emit_event(self, event_type: str, data: dict = None):
+ """Emit event"""
if self.on_event:
try:
self.on_event({
@@ -51,6 +79,11 @@ logger.error(f"Event callback error: {e}")
def _filter_think_tags(self, text: str) -> str:
+ """
+ Remove <think> and </think> tags but keep the content inside.
+ Some LLM providers (e.g., MiniMax) may return thinking process wrapped in <think> tags.
+ We only remove the tags themselves, keeping the actual thinking content.
+ """
if not text:
return text
import re
@@ -60,12 +93,22 @@ return text
def _hash_args(self, args: dict) -> str:
+ """Generate a simple hash for tool arguments"""
import hashlib
# Sort keys for consistent hashing
args_str = json.dumps(args, sort_keys=True, ensure_ascii=False)
return hashlib.md5(args_str.encode()).hexdigest()[:8]
def _check_consecutive_failures(self, tool_name: str, args: dict) -> Tuple[bool, str, bool]:
+ """
+ Check if tool has failed too many times consecutively or called repeatedly with same args
+
+ Returns:
+ (should_stop, reason, is_critical)
+ - should_stop: Whether to stop tool execution
+ - reason: Reason for stopping
+ - is_critical: Whether to abort entire conversation (True for 8+ failures)
+ """
args_hash = self._hash_args(args)
# Count consecutive calls (both success and failure) for same tool + args
@@ -117,6 +160,7 @@ return False, "", False
def _record_tool_result(self, tool_name: str, args: dict, success: bool):
+ """Record tool execution result for failure tracking"""
args_hash = self._hash_args(args)
self.tool_failure_history.append((tool_name, args_hash, success))
# Keep only last 50 records to avoid memory bloat
@@ -124,6 +168,15 @@ self.tool_failure_history = self.tool_failure_history[-50:]
def run_stream(self, user_message: str) -> str:
+ """
+ Execute streaming reasoning loop
+
+ Args:
+ user_message: User message
+
+ Returns:
+ Final response text
+ """
# Log user message with model info
logger.info(f"🤖 {self.model.model} | 👤 {user_message}")
@@ -426,6 +479,18 @@
def _call_llm_stream(self, retry_on_empty=True, retry_count=0, max_retries=3,
_overflow_retry: bool = False) -> Tuple[str, List[Dict]]:
+ """
+ Call LLM with streaming and automatic retry on errors
+
+ Args:
+ retry_on_empty: Whether to retry once if empty response is received
+ retry_count: Current retry attempt (internal use)
+ max_retries: Maximum number of retries for API errors
+ _overflow_retry: Internal flag indicating this is a retry after context overflow
+
+ Returns:
+ (response_text, tool_calls)
+ """
# Validate and fix message history (e.g. orphaned tool_result blocks).
# Context trimming is done once in run_stream() before the loop starts,
# NOT here — trimming mid-execution would strip the current run's
@@ -754,6 +819,15 @@ return full_content, tool_calls
def _execute_tool(self, tool_call: Dict) -> Dict[str, Any]:
+ """
+ Execute tool
+
+ Args:
+ tool_call: {"id": str, "name": str, "arguments": dict}
+
+ Returns:
+ Tool execution result
+ """
tool_name = tool_call["name"]
tool_id = tool_call["id"]
arguments = tool_call["arguments"]
@@ -856,6 +930,11 @@ return error_result
def _build_tool_not_found_message(self, tool_name: str) -> str:
+ """Build a helpful error message when a tool is not found.
+
+ If a skill with the same name exists in skill_manager, read its
+ SKILL.md and include the content so the LLM knows how to use it.
+ """
available_tools = list(self.tools.keys())
base_msg = f"Tool '{tool_name}' not found. Available tools: {available_tools}"
@@ -892,9 +971,22 @@ )
def _validate_and_fix_messages(self):
+ """Delegate to the shared sanitizer (see message_sanitizer.py)."""
sanitize_claude_messages(self.messages)
def _identify_complete_turns(self) -> List[Dict]:
+ """
+ 识别完整的对话轮次
+
+ 一个完整轮次包括:
+ 1. 用户消息(text)
+ 2. AI 回复(可能包含 tool_use)
+ 3. 工具结果(tool_result,如果有)
+ 4. 后续 AI 回复(如果有)
+
+ Returns:
+ List of turns, each turn is a dict with 'messages' list
+ """
turns = []
current_turn = {'messages': []}
@@ -939,12 +1031,21 @@ return turns
def _estimate_turn_tokens(self, turn: Dict) -> int:
+ """估算一个轮次的 tokens"""
return sum(
self.agent._estimate_message_tokens(msg)
for msg in turn['messages']
)
def _truncate_historical_tool_results(self):
+ """
+ Truncate tool_result content in historical messages to reduce context size.
+
+ Current turn results are kept at 30K chars (truncated at creation time).
+ Historical turn results are further truncated to 10K chars here.
+ This runs before token-based trimming so that we first shrink oversized
+ results, potentially avoiding the need to drop entire turns.
+ """
MAX_HISTORY_RESULT_CHARS = 20000
if len(self.messages) < 2:
@@ -989,6 +1090,17 @@ logger.info(f"📎 Truncated {truncated_count} historical tool result(s) to {MAX_HISTORY_RESULT_CHARS} chars")
def _aggressive_trim_for_overflow(self) -> bool:
+ """
+ Aggressively trim context when a real overflow error is returned by the API.
+
+ This method goes beyond normal _trim_messages by:
+ 1. Truncating all tool results (including current turn) to a small limit
+ 2. Keeping only the last 5 complete conversation turns
+ 3. Truncating overly long user messages
+
+ Returns:
+ True if messages were trimmed (worth retrying), False if nothing left to trim
+ """
if not self.messages:
return False
@@ -1080,6 +1192,14 @@ return False
def _trim_messages(self):
+ """
+ 智能清理消息历史,保持对话完整性
+
+ 使用完整轮次作为清理单位,确保:
+ 1. 不会在对话中间截断
+ 2. 工具调用链(tool_use + tool_result)保持完整
+ 3. 每轮对话都是完整的(用户消息 + AI回复 + 工具调用)
+ """
if not self.messages or not self.agent:
return
@@ -1224,6 +1344,12 @@ )
def _clear_session_db(self):
+ """
+ Clear the current session's persisted messages from SQLite DB.
+
+ This prevents dirty data (broken tool_use/tool_result pairs) from being
+ reloaded on the next request or after a restart.
+ """
try:
session_id = getattr(self.agent, '_current_session_id', None)
if not session_id:
@@ -1236,5 +1362,11 @@ logger.warning(f"Failed to clear session DB: {e}")
def _prepare_messages(self) -> List[Dict[str, Any]]:
+ """
+ Prepare messages to send to LLM
+
+ Note: For Claude API, system prompt should be passed separately via system parameter,
+ not as a message. The AgentLLMModel will handle this.
+ """
# Don't add system message here - it will be handled separately by the LLM adapter
return self.messages | https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/protocol/agent_stream.py |
Add standardized docstrings across the file | import os
import requests
from bridge.context import ContextType
from channel.chat_message import ChatMessage
from common.log import logger
from common.utils import expand_path
from config import conf
def _get_tmp_dir() -> str:
ws_root = expand_path(conf().get("agent_workspace", "~/cow"))
tmp_dir = os.path.join(ws_root, "tmp")
os.makedirs(tmp_dir, exist_ok=True)
return tmp_dir
class QQMessage(ChatMessage):
def __init__(self, event_data: dict, event_type: str):
super().__init__(event_data)
self.msg_id = event_data.get("id", "")
self.create_time = event_data.get("timestamp", "")
self.is_group = event_type in ("GROUP_AT_MESSAGE_CREATE",)
self.event_type = event_type
author = event_data.get("author", {})
from_user_id = author.get("member_openid", "") or author.get("id", "")
group_openid = event_data.get("group_openid", "")
content = event_data.get("content", "").strip()
attachments = event_data.get("attachments", [])
has_image = any(
a.get("content_type", "").startswith("image/") for a in attachments
) if attachments else False
if has_image and not content:
self.ctype = ContextType.IMAGE
img_attachment = next(
a for a in attachments if a.get("content_type", "").startswith("image/")
)
img_url = img_attachment.get("url", "")
if img_url and not img_url.startswith("http"):
img_url = "https://" + img_url
tmp_dir = _get_tmp_dir()
image_path = os.path.join(tmp_dir, f"qq_{self.msg_id}.png")
try:
resp = requests.get(img_url, timeout=30)
resp.raise_for_status()
with open(image_path, "wb") as f:
f.write(resp.content)
self.content = image_path
self.image_path = image_path
logger.info(f"[QQ] Image downloaded: {image_path}")
except Exception as e:
logger.error(f"[QQ] Failed to download image: {e}")
self.content = "[Image download failed]"
self.image_path = None
elif has_image and content:
self.ctype = ContextType.TEXT
image_paths = []
tmp_dir = _get_tmp_dir()
for idx, att in enumerate(attachments):
if not att.get("content_type", "").startswith("image/"):
continue
img_url = att.get("url", "")
if img_url and not img_url.startswith("http"):
img_url = "https://" + img_url
img_path = os.path.join(tmp_dir, f"qq_{self.msg_id}_{idx}.png")
try:
resp = requests.get(img_url, timeout=30)
resp.raise_for_status()
with open(img_path, "wb") as f:
f.write(resp.content)
image_paths.append(img_path)
except Exception as e:
logger.error(f"[QQ] Failed to download mixed image: {e}")
content_parts = [content]
for p in image_paths:
content_parts.append(f"[图片: {p}]")
self.content = "\n".join(content_parts)
else:
self.ctype = ContextType.TEXT
self.content = content
if event_type == "GROUP_AT_MESSAGE_CREATE":
self.from_user_id = from_user_id
self.to_user_id = ""
self.other_user_id = group_openid
self.actual_user_id = from_user_id
self.actual_user_nickname = from_user_id
elif event_type == "C2C_MESSAGE_CREATE":
user_openid = author.get("user_openid", "") or from_user_id
self.from_user_id = user_openid
self.to_user_id = ""
self.other_user_id = user_openid
self.actual_user_id = user_openid
elif event_type == "AT_MESSAGE_CREATE":
self.from_user_id = from_user_id
self.to_user_id = ""
channel_id = event_data.get("channel_id", "")
self.other_user_id = channel_id
self.actual_user_id = from_user_id
self.actual_user_nickname = author.get("username", from_user_id)
elif event_type == "DIRECT_MESSAGE_CREATE":
self.from_user_id = from_user_id
self.to_user_id = ""
guild_id = event_data.get("guild_id", "")
self.other_user_id = f"dm_{guild_id}_{from_user_id}"
self.actual_user_id = from_user_id
self.actual_user_nickname = author.get("username", from_user_id)
else:
raise NotImplementedError(f"Unsupported QQ event type: {event_type}")
logger.debug(f"[QQ] Message parsed: type={event_type}, ctype={self.ctype}, "
f"from={self.from_user_id}, content_len={len(self.content)}") | --- +++ @@ -9,6 +9,7 @@
def _get_tmp_dir() -> str:
+ """Return the workspace tmp directory (absolute path), creating it if needed."""
ws_root = expand_path(conf().get("agent_workspace", "~/cow"))
tmp_dir = os.path.join(ws_root, "tmp")
os.makedirs(tmp_dir, exist_ok=True)
@@ -16,6 +17,7 @@
class QQMessage(ChatMessage):
+ """Message wrapper for QQ Bot (websocket long-connection mode)."""
def __init__(self, event_data: dict, event_type: str):
super().__init__(event_data)
@@ -118,4 +120,4 @@ raise NotImplementedError(f"Unsupported QQ event type: {event_type}")
logger.debug(f"[QQ] Message parsed: type={event_type}, ctype={self.ctype}, "
- f"from={self.from_user_id}, content_len={len(self.content)}")+ f"from={self.from_user_id}, content_len={len(self.content)}")
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/channel/qq/qq_message.py |
Add concise docstrings to each method | # encoding:utf-8
import os
import signal
import sys
import time
from channel import channel_factory
from common import const
from common.log import logger
from config import load_config, conf
from plugins import *
import threading
_channel_mgr = None
def get_channel_manager():
return _channel_mgr
def _parse_channel_type(raw) -> list:
if isinstance(raw, list):
return [ch.strip() for ch in raw if ch.strip()]
if isinstance(raw, str):
return [ch.strip() for ch in raw.split(",") if ch.strip()]
return []
class ChannelManager:
def __init__(self):
self._channels = {} # channel_name -> channel instance
self._threads = {} # channel_name -> thread
self._primary_channel = None
self._lock = threading.Lock()
self.cloud_mode = False # set to True when cloud client is active
@property
def channel(self):
return self._primary_channel
def get_channel(self, channel_name: str):
return self._channels.get(channel_name)
def start(self, channel_names: list, first_start: bool = False):
with self._lock:
channels = []
for name in channel_names:
ch = channel_factory.create_channel(name)
ch.cloud_mode = self.cloud_mode
self._channels[name] = ch
channels.append((name, ch))
if self._primary_channel is None and name != "web":
self._primary_channel = ch
if self._primary_channel is None and channels:
self._primary_channel = channels[0][1]
if first_start:
PluginManager().load_plugins()
if conf().get("use_linkai"):
try:
from common import cloud_client
threading.Thread(
target=cloud_client.start,
args=(self._primary_channel, self),
daemon=True,
).start()
except Exception:
pass
# Start web console first so its logs print cleanly,
# then start remaining channels after a brief pause.
web_entry = None
other_entries = []
for entry in channels:
if entry[0] == "web":
web_entry = entry
else:
other_entries.append(entry)
ordered = ([web_entry] if web_entry else []) + other_entries
for i, (name, ch) in enumerate(ordered):
if i > 0 and name != "web":
time.sleep(0.1)
t = threading.Thread(target=self._run_channel, args=(name, ch), daemon=True)
self._threads[name] = t
t.start()
logger.debug(f"[ChannelManager] Channel '{name}' started in sub-thread")
def _run_channel(self, name: str, channel):
try:
channel.startup()
except Exception as e:
logger.error(f"[ChannelManager] Channel '{name}' startup error: {e}")
logger.exception(e)
def stop(self, channel_name: str = None):
# Pop under lock, then stop outside lock to avoid deadlock
with self._lock:
names = [channel_name] if channel_name else list(self._channels.keys())
to_stop = []
for name in names:
ch = self._channels.pop(name, None)
th = self._threads.pop(name, None)
to_stop.append((name, ch, th))
if channel_name and self._primary_channel is self._channels.get(channel_name):
self._primary_channel = None
for name, ch, th in to_stop:
if ch is None:
logger.warning(f"[ChannelManager] Channel '{name}' not found in managed channels")
if th and th.is_alive():
self._interrupt_thread(th, name)
continue
logger.info(f"[ChannelManager] Stopping channel '{name}'...")
graceful = False
if hasattr(ch, 'stop'):
try:
ch.stop()
graceful = True
except Exception as e:
logger.warning(f"[ChannelManager] Error during channel '{name}' stop: {e}")
if th and th.is_alive():
th.join(timeout=5)
if th.is_alive():
if graceful:
logger.info(f"[ChannelManager] Channel '{name}' thread still alive after stop(), "
"leaving daemon thread to finish on its own")
else:
logger.warning(f"[ChannelManager] Channel '{name}' thread did not exit in 5s, forcing interrupt")
self._interrupt_thread(th, name)
@staticmethod
def _interrupt_thread(th: threading.Thread, name: str):
import ctypes
try:
tid = th.ident
if tid is None:
return
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_ulong(tid), ctypes.py_object(SystemExit)
)
if res == 1:
logger.info(f"[ChannelManager] Interrupted thread for channel '{name}'")
elif res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_ulong(tid), None)
logger.warning(f"[ChannelManager] Failed to interrupt thread for channel '{name}'")
except Exception as e:
logger.warning(f"[ChannelManager] Thread interrupt error for '{name}': {e}")
def restart(self, new_channel_name: str):
logger.info(f"[ChannelManager] Restarting channel to '{new_channel_name}'...")
self.stop(new_channel_name)
_clear_singleton_cache(new_channel_name)
time.sleep(1)
self.start([new_channel_name], first_start=False)
logger.info(f"[ChannelManager] Channel restarted to '{new_channel_name}' successfully")
def add_channel(self, channel_name: str):
with self._lock:
if channel_name in self._channels:
logger.info(f"[ChannelManager] Channel '{channel_name}' already exists, restarting")
if self._channels.get(channel_name):
self.restart(channel_name)
return
logger.info(f"[ChannelManager] Adding channel '{channel_name}'...")
_clear_singleton_cache(channel_name)
self.start([channel_name], first_start=False)
logger.info(f"[ChannelManager] Channel '{channel_name}' added successfully")
def remove_channel(self, channel_name: str):
with self._lock:
if channel_name not in self._channels:
logger.warning(f"[ChannelManager] Channel '{channel_name}' not found, nothing to remove")
return
logger.info(f"[ChannelManager] Removing channel '{channel_name}'...")
self.stop(channel_name)
logger.info(f"[ChannelManager] Channel '{channel_name}' removed successfully")
def _clear_singleton_cache(channel_name: str):
cls_map = {
"web": "channel.web.web_channel.WebChannel",
"wechatmp": "channel.wechatmp.wechatmp_channel.WechatMPChannel",
"wechatmp_service": "channel.wechatmp.wechatmp_channel.WechatMPChannel",
"wechatcom_app": "channel.wechatcom.wechatcomapp_channel.WechatComAppChannel",
const.FEISHU: "channel.feishu.feishu_channel.FeiShuChanel",
const.DINGTALK: "channel.dingtalk.dingtalk_channel.DingTalkChanel",
const.WECOM_BOT: "channel.wecom_bot.wecom_bot_channel.WecomBotChannel",
const.QQ: "channel.qq.qq_channel.QQChannel",
}
module_path = cls_map.get(channel_name)
if not module_path:
return
try:
parts = module_path.rsplit(".", 1)
module_name, class_name = parts[0], parts[1]
import importlib
module = importlib.import_module(module_name)
wrapper = getattr(module, class_name, None)
if wrapper and hasattr(wrapper, '__closure__') and wrapper.__closure__:
for cell in wrapper.__closure__:
try:
cell_contents = cell.cell_contents
if isinstance(cell_contents, dict):
cell_contents.clear()
logger.debug(f"[ChannelManager] Cleared singleton cache for {class_name}")
break
except ValueError:
pass
except Exception as e:
logger.warning(f"[ChannelManager] Failed to clear singleton cache: {e}")
def sigterm_handler_wrap(_signo):
old_handler = signal.getsignal(_signo)
def func(_signo, _stack_frame):
logger.info("signal {} received, exiting...".format(_signo))
conf().save_user_datas()
if callable(old_handler): # check old_handler
return old_handler(_signo, _stack_frame)
sys.exit(0)
signal.signal(_signo, func)
def run():
global _channel_mgr
try:
# load config
load_config()
# ctrl + c
sigterm_handler_wrap(signal.SIGINT)
# kill signal
sigterm_handler_wrap(signal.SIGTERM)
# Parse channel_type into a list
raw_channel = conf().get("channel_type", "web")
if "--cmd" in sys.argv:
channel_names = ["terminal"]
else:
channel_names = _parse_channel_type(raw_channel)
if not channel_names:
channel_names = ["web"]
# Auto-start web console unless explicitly disabled
web_console_enabled = conf().get("web_console", True)
if web_console_enabled and "web" not in channel_names:
channel_names.append("web")
logger.info(f"[App] Starting channels: {channel_names}")
_channel_mgr = ChannelManager()
_channel_mgr.start(channel_names, first_start=True)
while True:
time.sleep(1)
except Exception as e:
logger.error("App startup failed!")
logger.exception(e)
if __name__ == "__main__":
run() | --- +++ @@ -21,6 +21,13 @@
def _parse_channel_type(raw) -> list:
+ """
+ Parse channel_type config value into a list of channel names.
+ Supports:
+ - single string: "feishu"
+ - comma-separated string: "feishu, dingtalk"
+ - list: ["feishu", "dingtalk"]
+ """
if isinstance(raw, list):
return [ch.strip() for ch in raw if ch.strip()]
if isinstance(raw, str):
@@ -29,6 +36,11 @@
class ChannelManager:
+ """
+ Manage the lifecycle of multiple channels running concurrently.
+ Each channel.startup() runs in its own daemon thread.
+ The web channel is started as default console unless explicitly disabled.
+ """
def __init__(self):
self._channels = {} # channel_name -> channel instance
@@ -39,12 +51,17 @@
@property
def channel(self):
+ """Return the primary (first non-web) channel for backward compatibility."""
return self._primary_channel
def get_channel(self, channel_name: str):
return self._channels.get(channel_name)
def start(self, channel_names: list, first_start: bool = False):
+ """
+ Create and start one or more channels in sub-threads.
+ If first_start is True, plugins and linkai client will also be initialized.
+ """
with self._lock:
channels = []
for name in channel_names:
@@ -99,6 +116,10 @@ logger.exception(e)
def stop(self, channel_name: str = None):
+ """
+ Stop channel(s). If channel_name is given, stop only that channel;
+ otherwise stop all channels.
+ """
# Pop under lock, then stop outside lock to avoid deadlock
with self._lock:
names = [channel_name] if channel_name else list(self._channels.keys())
@@ -136,6 +157,7 @@
@staticmethod
def _interrupt_thread(th: threading.Thread, name: str):
+ """Raise SystemExit in target thread to break blocking loops like start_forever."""
import ctypes
try:
tid = th.ident
@@ -153,6 +175,10 @@ logger.warning(f"[ChannelManager] Thread interrupt error for '{name}': {e}")
def restart(self, new_channel_name: str):
+ """
+ Restart a single channel with a new channel type.
+ Can be called from any thread (e.g. linkai config callback).
+ """
logger.info(f"[ChannelManager] Restarting channel to '{new_channel_name}'...")
self.stop(new_channel_name)
_clear_singleton_cache(new_channel_name)
@@ -161,6 +187,10 @@ logger.info(f"[ChannelManager] Channel restarted to '{new_channel_name}' successfully")
def add_channel(self, channel_name: str):
+ """
+ Dynamically add and start a new channel.
+ If the channel is already running, restart it instead.
+ """
with self._lock:
if channel_name in self._channels:
logger.info(f"[ChannelManager] Channel '{channel_name}' already exists, restarting")
@@ -173,6 +203,9 @@ logger.info(f"[ChannelManager] Channel '{channel_name}' added successfully")
def remove_channel(self, channel_name: str):
+ """
+ Dynamically stop and remove a running channel.
+ """
with self._lock:
if channel_name not in self._channels:
logger.warning(f"[ChannelManager] Channel '{channel_name}' not found, nothing to remove")
@@ -183,6 +216,10 @@
def _clear_singleton_cache(channel_name: str):
+ """
+ Clear the singleton cache for the channel class so that
+ a new instance can be created with updated config.
+ """
cls_map = {
"web": "channel.web.web_channel.WebChannel",
"wechatmp": "channel.wechatmp.wechatmp_channel.WechatMPChannel",
@@ -267,4 +304,4 @@
if __name__ == "__main__":
- run()+ run()
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/app.py |
Add return value explanations in docstrings |
import os
from datetime import datetime
from typing import Dict, List, Optional
from pathlib import Path
from common.log import logger
class MemoryService:
def __init__(self, workspace_root: str):
self.workspace_root = workspace_root
self.memory_dir = os.path.join(workspace_root, "memory")
# ------------------------------------------------------------------
# list — paginated file metadata
# ------------------------------------------------------------------
def list_files(self, page: int = 1, page_size: int = 20) -> dict:
files: List[dict] = []
# 1. Global memory — MEMORY.md in workspace root
global_path = os.path.join(self.workspace_root, "MEMORY.md")
if os.path.isfile(global_path):
files.append(self._file_info(global_path, "MEMORY.md", "global"))
# 2. Daily memory files — memory/*.md (sorted newest first)
if os.path.isdir(self.memory_dir):
daily_files = []
for name in os.listdir(self.memory_dir):
full = os.path.join(self.memory_dir, name)
if os.path.isfile(full) and name.endswith(".md"):
daily_files.append((name, full))
# Sort by filename descending (newest date first)
daily_files.sort(key=lambda x: x[0], reverse=True)
for name, full in daily_files:
files.append(self._file_info(full, name, "daily"))
total = len(files)
# Paginate
start = (page - 1) * page_size
end = start + page_size
page_items = files[start:end]
return {
"page": page,
"page_size": page_size,
"total": total,
"list": page_items,
}
# ------------------------------------------------------------------
# content — read a single file
# ------------------------------------------------------------------
def get_content(self, filename: str) -> dict:
path = self._resolve_path(filename)
if not os.path.isfile(path):
raise FileNotFoundError(f"Memory file not found: {filename}")
with open(path, "r", encoding="utf-8") as f:
content = f.read()
return {
"filename": filename,
"content": content,
}
# ------------------------------------------------------------------
# dispatch — single entry point for protocol messages
# ------------------------------------------------------------------
def dispatch(self, action: str, payload: Optional[dict] = None) -> dict:
payload = payload or {}
try:
if action == "list":
page = payload.get("page", 1)
page_size = payload.get("page_size", 20)
result_payload = self.list_files(page=page, page_size=page_size)
return {"action": action, "code": 200, "message": "success", "payload": result_payload}
elif action == "content":
filename = payload.get("filename")
if not filename:
return {"action": action, "code": 400, "message": "filename is required", "payload": None}
result_payload = self.get_content(filename)
return {"action": action, "code": 200, "message": "success", "payload": result_payload}
else:
return {"action": action, "code": 400, "message": f"unknown action: {action}", "payload": None}
except FileNotFoundError as e:
return {"action": action, "code": 404, "message": str(e), "payload": None}
except Exception as e:
logger.error(f"[MemoryService] dispatch error: action={action}, error={e}")
return {"action": action, "code": 500, "message": str(e), "payload": None}
# ------------------------------------------------------------------
# internal helpers
# ------------------------------------------------------------------
def _resolve_path(self, filename: str) -> str:
if filename == "MEMORY.md":
return os.path.join(self.workspace_root, filename)
return os.path.join(self.memory_dir, filename)
@staticmethod
def _file_info(path: str, filename: str, file_type: str) -> dict:
stat = os.stat(path)
updated_at = datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M:%S")
return {
"filename": filename,
"type": file_type,
"size": stat.st_size,
"updated_at": updated_at,
} | --- +++ @@ -1,3 +1,13 @@+"""
+Memory service for handling memory query operations via cloud protocol.
+
+Provides a unified interface for listing and reading memory files,
+callable from the cloud client (LinkAI) or a future web console.
+
+Memory file layout (under workspace_root):
+ MEMORY.md -> type: global
+ memory/2026-02-20.md -> type: daily
+"""
import os
from datetime import datetime
@@ -7,8 +17,15 @@
class MemoryService:
+ """
+ High-level service for memory file queries.
+ Operates directly on the filesystem — no MemoryManager dependency.
+ """
def __init__(self, workspace_root: str):
+ """
+ :param workspace_root: Workspace root directory (e.g. ~/cow)
+ """
self.workspace_root = workspace_root
self.memory_dir = os.path.join(workspace_root, "memory")
@@ -16,6 +33,22 @@ # list — paginated file metadata
# ------------------------------------------------------------------
def list_files(self, page: int = 1, page_size: int = 20) -> dict:
+ """
+ List all memory files with metadata (without content).
+
+ Returns::
+
+ {
+ "page": 1,
+ "page_size": 20,
+ "total": 15,
+ "list": [
+ {"filename": "MEMORY.md", "type": "global", "size": 2048, "updated_at": "2026-02-20 10:00:00"},
+ {"filename": "2026-02-20.md", "type": "daily", "size": 512, "updated_at": "2026-02-20 09:30:00"},
+ ...
+ ]
+ }
+ """
files: List[dict] = []
# 1. Global memory — MEMORY.md in workspace root
@@ -53,6 +86,13 @@ # content — read a single file
# ------------------------------------------------------------------
def get_content(self, filename: str) -> dict:
+ """
+ Read the full content of a memory file.
+
+ :param filename: File name, e.g. ``MEMORY.md`` or ``2026-02-20.md``
+ :return: dict with ``filename`` and ``content``
+ :raises FileNotFoundError: if the file does not exist
+ """
path = self._resolve_path(filename)
if not os.path.isfile(path):
raise FileNotFoundError(f"Memory file not found: {filename}")
@@ -69,6 +109,13 @@ # dispatch — single entry point for protocol messages
# ------------------------------------------------------------------
def dispatch(self, action: str, payload: Optional[dict] = None) -> dict:
+ """
+ Dispatch a memory management action.
+
+ :param action: ``list`` or ``content``
+ :param payload: action-specific payload
+ :return: protocol-compatible response dict
+ """
payload = payload or {}
try:
if action == "list":
@@ -97,12 +144,19 @@ # internal helpers
# ------------------------------------------------------------------
def _resolve_path(self, filename: str) -> str:
+ """
+ Resolve a filename to its absolute path.
+
+ - ``MEMORY.md`` → ``{workspace_root}/MEMORY.md``
+ - ``2026-02-20.md`` → ``{workspace_root}/memory/2026-02-20.md``
+ """
if filename == "MEMORY.md":
return os.path.join(self.workspace_root, filename)
return os.path.join(self.memory_dir, filename)
@staticmethod
def _file_info(path: str, filename: str, file_type: str) -> dict:
+ """Build a file metadata dict."""
stat = os.stat(path)
updated_at = datetime.fromtimestamp(stat.st_mtime).strftime("%Y-%m-%d %H:%M:%S")
return {
@@ -110,4 +164,4 @@ "type": file_type,
"size": stat.st_size,
"updated_at": updated_at,
- }+ }
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/memory/service.py |
Fully document this Python code with docstrings |
import os
import re
from typing import Dict, Any
from pathlib import Path
from agent.tools.base_tool import BaseTool, ToolResult
from common.log import logger
from common.utils import expand_path
# API Key 知识库:常见的环境变量及其描述
API_KEY_REGISTRY = {
# AI 模型服务
"OPENAI_API_KEY": "OpenAI API 密钥 (用于GPT模型、Embedding模型)",
"GEMINI_API_KEY": "Google Gemini API 密钥",
"CLAUDE_API_KEY": "Claude API 密钥 (用于Claude模型)",
"LINKAI_API_KEY": "LinkAI智能体平台 API 密钥,支持多种模型切换",
# 搜索服务
"BOCHA_API_KEY": "博查 AI 搜索 API 密钥 ",
}
class EnvConfig(BaseTool):
name: str = "env_config"
description: str = (
"Manage API keys and skill configurations securely. "
"Use this tool when user wants to configure API keys (like BOCHA_API_KEY, OPENAI_API_KEY), "
"view configured keys, or manage skill settings. "
"Actions: 'set' (add/update key), 'get' (view specific key), 'list' (show all configured keys), 'delete' (remove key). "
"Values are automatically masked for security. Changes take effect immediately via hot reload."
)
params: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"description": "Action to perform: 'set', 'get', 'list', 'delete'",
"enum": ["set", "get", "list", "delete"]
},
"key": {
"type": "string",
"description": (
"Environment variable key name. Common keys:\n"
"- OPENAI_API_KEY: OpenAI API (GPT models)\n"
"- OPENAI_API_BASE: OpenAI API base URL\n"
"- CLAUDE_API_KEY: Anthropic Claude API\n"
"- GEMINI_API_KEY: Google Gemini API\n"
"- LINKAI_API_KEY: LinkAI platform\n"
"- BOCHA_API_KEY: Bocha AI search (博查搜索)\n"
"Use exact key names (case-sensitive, all uppercase with underscores)"
)
},
"value": {
"type": "string",
"description": "Value to set for the environment variable (for 'set' action)"
}
},
"required": ["action"]
}
def __init__(self, config: dict = None):
self.config = config or {}
# Store env config in ~/.cow directory (outside workspace for security)
self.env_dir = expand_path("~/.cow")
self.env_path = os.path.join(self.env_dir, '.env')
self.agent_bridge = self.config.get("agent_bridge") # Reference to AgentBridge for hot reload
# Don't create .env file in __init__ to avoid issues during tool discovery
# It will be created on first use in execute()
def _ensure_env_file(self):
# Create ~/.cow directory if it doesn't exist
os.makedirs(self.env_dir, exist_ok=True)
if not os.path.exists(self.env_path):
Path(self.env_path).touch()
logger.info(f"[EnvConfig] Created .env file at {self.env_path}")
def _mask_value(self, value: str) -> str:
if not value or len(value) <= 10:
return "***"
return f"{value[:6]}***{value[-4:]}"
def _read_env_file(self) -> Dict[str, str]:
env_vars = {}
if os.path.exists(self.env_path):
with open(self.env_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
# Skip empty lines and comments
if not line or line.startswith('#'):
continue
# Parse KEY=VALUE
match = re.match(r'^([^=]+)=(.*)$', line)
if match:
key, value = match.groups()
env_vars[key.strip()] = value.strip()
return env_vars
def _write_env_file(self, env_vars: Dict[str, str]):
with open(self.env_path, 'w', encoding='utf-8') as f:
f.write("# Environment variables for agent skills\n")
f.write("# Auto-managed by env_config tool\n\n")
for key, value in sorted(env_vars.items()):
f.write(f"{key}={value}\n")
def _reload_env(self):
env_vars = self._read_env_file()
for key, value in env_vars.items():
os.environ[key] = value
logger.debug(f"[EnvConfig] Reloaded {len(env_vars)} environment variables")
def _refresh_skills(self):
if self.agent_bridge:
try:
# Reload .env file
self._reload_env()
# Refresh skills in all agent instances
refreshed = self.agent_bridge.refresh_all_skills()
logger.info(f"[EnvConfig] Refreshed skills in {refreshed} agent instance(s)")
return True
except Exception as e:
logger.warning(f"[EnvConfig] Failed to refresh skills: {e}")
return False
return False
def execute(self, args: Dict[str, Any]) -> ToolResult:
# Ensure .env file exists on first use
self._ensure_env_file()
action = args.get("action")
key = args.get("key")
value = args.get("value")
try:
if action == "set":
if not key or not value:
return ToolResult.fail("Error: 'key' and 'value' are required for 'set' action.")
# Read current env vars
env_vars = self._read_env_file()
# Update the key
env_vars[key] = value
# Write back to file
self._write_env_file(env_vars)
# Update current process env
os.environ[key] = value
logger.info(f"[EnvConfig] Set {key}={self._mask_value(value)}")
# Try to refresh skills immediately
refreshed = self._refresh_skills()
result = {
"message": f"Successfully set {key}",
"key": key,
"value": self._mask_value(value),
}
if refreshed:
result["note"] = "✅ Skills refreshed automatically - changes are now active"
else:
result["note"] = "⚠️ Skills not refreshed - restart agent to load new skills"
return ToolResult.success(result)
elif action == "get":
if not key:
return ToolResult.fail("Error: 'key' is required for 'get' action.")
# Check in file first, then in current env
env_vars = self._read_env_file()
value = env_vars.get(key) or os.getenv(key)
# Get description from registry
description = API_KEY_REGISTRY.get(key, "未知用途的环境变量")
if value is not None:
logger.info(f"[EnvConfig] Got {key}={self._mask_value(value)}")
return ToolResult.success({
"key": key,
"value": self._mask_value(value),
"description": description,
"exists": True,
"note": f"Value is masked for security. In bash, use ${key} directly — it is auto-injected."
})
else:
return ToolResult.success({
"key": key,
"description": description,
"exists": False,
"message": f"Environment variable '{key}' is not set"
})
elif action == "list":
env_vars = self._read_env_file()
# Build detailed variable list with descriptions
variables_with_info = {}
for key, value in env_vars.items():
variables_with_info[key] = {
"value": self._mask_value(value),
"description": API_KEY_REGISTRY.get(key, "未知用途的环境变量")
}
logger.info(f"[EnvConfig] Listed {len(env_vars)} environment variables")
if not env_vars:
return ToolResult.success({
"message": "No environment variables configured",
"variables": {},
"note": "常用的 API 密钥可以通过 env_config(action='set', key='KEY_NAME', value='your-key') 来配置"
})
return ToolResult.success({
"message": f"Found {len(env_vars)} environment variable(s)",
"variables": variables_with_info
})
elif action == "delete":
if not key:
return ToolResult.fail("Error: 'key' is required for 'delete' action.")
# Read current env vars
env_vars = self._read_env_file()
if key not in env_vars:
return ToolResult.success({
"message": f"Environment variable '{key}' was not set",
"key": key
})
# Remove the key
del env_vars[key]
# Write back to file
self._write_env_file(env_vars)
# Remove from current process env
if key in os.environ:
del os.environ[key]
logger.info(f"[EnvConfig] Deleted {key}")
# Try to refresh skills immediately
refreshed = self._refresh_skills()
result = {
"message": f"Successfully deleted {key}",
"key": key,
}
if refreshed:
result["note"] = "✅ Skills refreshed automatically - changes are now active"
else:
result["note"] = "⚠️ Skills not refreshed - restart agent to apply changes"
return ToolResult.success(result)
else:
return ToolResult.fail(f"Error: Unknown action '{action}'. Use 'set', 'get', 'list', or 'delete'.")
except Exception as e:
logger.error(f"[EnvConfig] Error: {e}", exc_info=True)
return ToolResult.fail(f"EnvConfig tool error: {str(e)}") | --- +++ @@ -1,3 +1,6 @@+"""
+Environment Configuration Tool - Manage API keys and environment variables
+"""
import os
import re
@@ -21,6 +24,7 @@ }
class EnvConfig(BaseTool):
+ """Tool for managing environment variables (API keys, etc.)"""
name: str = "env_config"
description: str = (
@@ -70,6 +74,7 @@ # It will be created on first use in execute()
def _ensure_env_file(self):
+ """Ensure the .env file exists"""
# Create ~/.cow directory if it doesn't exist
os.makedirs(self.env_dir, exist_ok=True)
@@ -78,11 +83,13 @@ logger.info(f"[EnvConfig] Created .env file at {self.env_path}")
def _mask_value(self, value: str) -> str:
+ """Mask sensitive parts of a value for logging"""
if not value or len(value) <= 10:
return "***"
return f"{value[:6]}***{value[-4:]}"
def _read_env_file(self) -> Dict[str, str]:
+ """Read all key-value pairs from .env file"""
env_vars = {}
if os.path.exists(self.env_path):
with open(self.env_path, 'r', encoding='utf-8') as f:
@@ -99,6 +106,7 @@ return env_vars
def _write_env_file(self, env_vars: Dict[str, str]):
+ """Write all key-value pairs to .env file"""
with open(self.env_path, 'w', encoding='utf-8') as f:
f.write("# Environment variables for agent skills\n")
f.write("# Auto-managed by env_config tool\n\n")
@@ -106,12 +114,14 @@ f.write(f"{key}={value}\n")
def _reload_env(self):
+ """Reload environment variables from .env file"""
env_vars = self._read_env_file()
for key, value in env_vars.items():
os.environ[key] = value
logger.debug(f"[EnvConfig] Reloaded {len(env_vars)} environment variables")
def _refresh_skills(self):
+ """Refresh skills after environment variable changes"""
if self.agent_bridge:
try:
# Reload .env file
@@ -127,6 +137,12 @@ return False
def execute(self, args: Dict[str, Any]) -> ToolResult:
+ """
+ Execute environment configuration operation
+
+ :param args: Contains action, key, and value parameters
+ :return: Result of the operation
+ """
# Ensure .env file exists on first use
self._ensure_env_file()
@@ -267,4 +283,4 @@
except Exception as e:
logger.error(f"[EnvConfig] Error: {e}", exc_info=True)
- return ToolResult.fail(f"EnvConfig tool error: {str(e)}")+ return ToolResult.fail(f"EnvConfig tool error: {str(e)}")
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/tools/env_config/env_config.py |
Write docstrings for algorithm functions |
from typing import Any, Dict, List, Optional
class LLMRequest:
def __init__(self, messages: List[Dict[str, str]] = None, model: Optional[str] = None,
temperature: float = 0.7, max_tokens: Optional[int] = None,
stream: bool = False, tools: Optional[List] = None, **kwargs):
self.messages = messages or []
self.model = model
self.temperature = temperature
self.max_tokens = max_tokens
self.stream = stream
self.tools = tools
# Allow extra attributes
for key, value in kwargs.items():
setattr(self, key, value)
class LLMModel:
def __init__(self, model: str = None, **kwargs):
self.model = model
self.config = kwargs
def call(self, request: LLMRequest):
raise NotImplementedError("LLMModel.call not implemented in this context")
def call_stream(self, request: LLMRequest):
raise NotImplementedError("LLMModel.call_stream not implemented in this context")
class ModelFactory:
@staticmethod
def create_model(model_type: str, **kwargs):
raise NotImplementedError("ModelFactory.create_model not implemented in this context") | --- +++ @@ -1,8 +1,13 @@+"""
+Models module for agent system.
+Provides basic model classes needed by tools and bridge integration.
+"""
from typing import Any, Dict, List, Optional
class LLMRequest:
+ """Request model for LLM operations"""
def __init__(self, messages: List[Dict[str, str]] = None, model: Optional[str] = None,
temperature: float = 0.7, max_tokens: Optional[int] = None,
@@ -19,20 +24,34 @@
class LLMModel:
+ """Base class for LLM models"""
def __init__(self, model: str = None, **kwargs):
self.model = model
self.config = kwargs
def call(self, request: LLMRequest):
+ """
+ Call the model with a request.
+ This is a placeholder implementation.
+ """
raise NotImplementedError("LLMModel.call not implemented in this context")
def call_stream(self, request: LLMRequest):
+ """
+ Call the model with streaming.
+ This is a placeholder implementation.
+ """
raise NotImplementedError("LLMModel.call_stream not implemented in this context")
class ModelFactory:
+ """Factory for creating model instances"""
@staticmethod
def create_model(model_type: str, **kwargs):
+ """
+ Create a model instance based on type.
+ This is a placeholder implementation.
+ """
raise NotImplementedError("ModelFactory.create_model not implemented in this context") | https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/agent/protocol/models.py |
Create Google-style docstrings for my code | # encoding:utf-8
import json
import time
import requests
from models.baidu.baidu_wenxin_session import BaiduWenxinSession
from models.bot import Bot
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common import const
from common.log import logger
from config import conf
# Optional OpenAI image support
try:
from models.openai.open_ai_image import OpenAIImage
_openai_image_available = True
except Exception as e:
logger.warning(f"OpenAI image support not available: {e}")
_openai_image_available = False
OpenAIImage = object # Fallback to object
user_session = dict()
# OpenAI对话模型API (可用)
class ClaudeAPIBot(Bot, OpenAIImage):
def __init__(self):
super().__init__()
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or "text-davinci-003")
@property
def api_key(self):
return conf().get("claude_api_key")
@property
def api_base(self):
return conf().get("claude_api_base") or "https://api.anthropic.com/v1"
@property
def proxy(self):
return conf().get("proxy", None)
def reply(self, query, context=None):
# acquire reply content
if context and context.type:
if context.type == ContextType.TEXT:
logger.info("[CLAUDE_API] query={}".format(query))
session_id = context["session_id"]
reply = None
if query == "#清除记忆":
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
else:
session = self.sessions.session_query(query, session_id)
result = self.reply_text(session)
logger.info(result)
total_tokens, completion_tokens, reply_content = (
result["total_tokens"],
result["completion_tokens"],
result["content"],
)
logger.debug(
"[CLAUDE_API] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(str(session), session_id, reply_content, completion_tokens)
)
if total_tokens == 0:
reply = Reply(ReplyType.ERROR, reply_content)
else:
self.sessions.session_reply(reply_content, session_id, total_tokens)
reply = Reply(ReplyType.TEXT, reply_content)
return reply
elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
def reply_text(self, session: BaiduWenxinSession, retry_count=0, tools=None):
try:
actual_model = self._model_mapping(conf().get("model"))
# Prepare headers
headers = {
"x-api-key": self.api_key,
"anthropic-version": "2023-06-01",
"content-type": "application/json"
}
# Extract system prompt if present and prepare Claude-compatible messages
system_prompt = conf().get("character_desc", "")
claude_messages = []
for msg in session.messages:
if msg.get("role") == "system":
system_prompt = msg["content"]
else:
claude_messages.append(msg)
# Prepare request data
data = {
"model": actual_model,
"messages": claude_messages,
"max_tokens": self._get_max_tokens(actual_model)
}
if system_prompt:
data["system"] = system_prompt
if tools:
data["tools"] = tools
# Make HTTP request
proxies = {"http": self.proxy, "https": self.proxy} if self.proxy else None
response = requests.post(
f"{self.api_base}/messages",
headers=headers,
json=data,
proxies=proxies
)
if response.status_code != 200:
raise Exception(f"API request failed: {response.status_code} - {response.text}")
claude_response = response.json()
# Handle response content and tool calls
res_content = ""
tool_calls = []
content_blocks = claude_response.get("content", [])
for block in content_blocks:
if block.get("type") == "text":
res_content += block.get("text", "")
elif block.get("type") == "tool_use":
tool_calls.append({
"id": block.get("id", ""),
"name": block.get("name", ""),
"arguments": block.get("input", {})
})
res_content = res_content.strip().replace("<|endoftext|>", "")
usage = claude_response.get("usage", {})
total_tokens = usage.get("input_tokens", 0) + usage.get("output_tokens", 0)
completion_tokens = usage.get("output_tokens", 0)
logger.info("[CLAUDE_API] reply={}".format(res_content))
if tool_calls:
logger.info("[CLAUDE_API] tool_calls={}".format(tool_calls))
result = {
"total_tokens": total_tokens,
"completion_tokens": completion_tokens,
"content": res_content,
}
if tool_calls:
result["tool_calls"] = tool_calls
return result
except Exception as e:
need_retry = retry_count < 2
result = {"total_tokens": 0, "completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
# Handle different types of errors
error_str = str(e).lower()
if "rate" in error_str or "limit" in error_str:
logger.warn("[CLAUDE_API] RateLimitError: {}".format(e))
result["content"] = "提问太快啦,请休息一下再问我吧"
if need_retry:
time.sleep(20)
elif "timeout" in error_str:
logger.warn("[CLAUDE_API] Timeout: {}".format(e))
result["content"] = "我没有收到你的消息"
if need_retry:
time.sleep(5)
elif "connection" in error_str or "network" in error_str:
logger.warn("[CLAUDE_API] APIConnectionError: {}".format(e))
need_retry = False
result["content"] = "我连接不到你的网络"
else:
logger.warn("[CLAUDE_API] Exception: {}".format(e))
need_retry = False
self.sessions.clear_session(session.session_id)
if need_retry:
logger.warn("[CLAUDE_API] 第{}次重试".format(retry_count + 1))
return self.reply_text(session, retry_count + 1, tools)
else:
return result
def _model_mapping(self, model) -> str:
if model == "claude-3-opus":
return const.CLAUDE_3_OPUS
elif model == "claude-3-sonnet":
return const.CLAUDE_3_SONNET
elif model == "claude-3-haiku":
return const.CLAUDE_3_HAIKU
elif model == "claude-3.5-sonnet":
return const.CLAUDE_35_SONNET
return model
def _get_max_tokens(self, model: str) -> int:
if model and (model.startswith("claude-3-5") or model.startswith("claude-3-7")):
return 8192
elif model and model.startswith("claude-3") and "opus" in model:
return 4096
elif model and (model.startswith("claude-sonnet-4") or model.startswith("claude-opus-4")):
return 64000
return 8192
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
actual_model = self._model_mapping(conf().get("model"))
# Extract system prompt from messages if present
system_prompt = kwargs.get("system", conf().get("character_desc", ""))
claude_messages = []
for msg in messages:
if msg.get("role") == "system":
system_prompt = msg["content"]
else:
claude_messages.append(msg)
request_params = {
"model": actual_model,
"max_tokens": kwargs.get("max_tokens", self._get_max_tokens(actual_model)),
"messages": claude_messages,
"stream": stream
}
if system_prompt:
request_params["system"] = system_prompt
if tools:
request_params["tools"] = tools
try:
if stream:
return self._handle_stream_response(request_params)
else:
return self._handle_sync_response(request_params)
except Exception as e:
logger.error(f"Claude API call error: {e}")
if stream:
# Return error generator for stream
def error_generator():
yield {
"error": True,
"message": str(e),
"status_code": 500
}
return error_generator()
else:
# Return error response for sync
return {
"error": True,
"message": str(e),
"status_code": 500
}
def _handle_sync_response(self, request_params):
# Prepare headers
headers = {
"x-api-key": self.api_key,
"anthropic-version": "2023-06-01",
"content-type": "application/json"
}
# Make HTTP request
proxies = {"http": self.proxy, "https": self.proxy} if self.proxy else None
response = requests.post(
f"{self.api_base}/messages",
headers=headers,
json=request_params,
proxies=proxies
)
if response.status_code != 200:
raise Exception(f"API request failed: {response.status_code} - {response.text}")
claude_response = response.json()
# Extract content blocks
text_content = ""
tool_calls = []
content_blocks = claude_response.get("content", [])
for block in content_blocks:
if block.get("type") == "text":
text_content += block.get("text", "")
elif block.get("type") == "tool_use":
tool_calls.append({
"id": block.get("id", ""),
"type": "function",
"function": {
"name": block.get("name", ""),
"arguments": json.dumps(block.get("input", {}))
}
})
# Build message in OpenAI format
message = {
"role": "assistant",
"content": text_content
}
if tool_calls:
message["tool_calls"] = tool_calls
# Format response to match OpenAI structure
usage = claude_response.get("usage", {})
formatted_response = {
"id": claude_response.get("id", ""),
"object": "chat.completion",
"created": int(time.time()),
"model": claude_response.get("model", request_params["model"]),
"choices": [
{
"index": 0,
"message": message,
"finish_reason": claude_response.get("stop_reason", "stop")
}
],
"usage": {
"prompt_tokens": usage.get("input_tokens", 0),
"completion_tokens": usage.get("output_tokens", 0),
"total_tokens": usage.get("input_tokens", 0) + usage.get("output_tokens", 0)
}
}
return formatted_response
def _handle_stream_response(self, request_params):
# Prepare headers
headers = {
"x-api-key": self.api_key,
"anthropic-version": "2023-06-01",
"content-type": "application/json"
}
# Add stream parameter
request_params["stream"] = True
# Track tool use state
tool_uses_map = {} # {index: {id, name, input}}
current_tool_use_index = -1
stop_reason = None # Track stop reason from Claude
try:
# Make streaming HTTP request
proxies = {"http": self.proxy, "https": self.proxy} if self.proxy else None
response = requests.post(
f"{self.api_base}/messages",
headers=headers,
json=request_params,
proxies=proxies,
stream=True
)
if response.status_code != 200:
error_text = response.text
try:
error_data = json.loads(error_text)
error_msg = error_data.get("error", {}).get("message", error_text)
except Exception:
error_msg = error_text or "Unknown error"
yield {
"error": True,
"status_code": response.status_code,
"message": error_msg
}
return
# Process streaming response
for line in response.iter_lines():
if line:
line = line.decode('utf-8')
if line.startswith('data: '):
line = line[6:] # Remove 'data: ' prefix
if line == '[DONE]':
break
try:
event = json.loads(line)
event_type = event.get("type")
if event_type == "content_block_start":
# New content block
block = event.get("content_block", {})
if block.get("type") == "tool_use":
current_tool_use_index = event.get("index", 0)
tool_uses_map[current_tool_use_index] = {
"id": block.get("id", ""),
"name": block.get("name", ""),
"input": ""
}
elif event_type == "content_block_delta":
delta = event.get("delta", {})
delta_type = delta.get("type")
if delta_type == "text_delta":
# Text content
content = delta.get("text", "")
yield {
"id": event.get("id", ""),
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": request_params["model"],
"choices": [{
"index": 0,
"delta": {"content": content},
"finish_reason": None
}]
}
elif delta_type == "input_json_delta":
# Tool input accumulation
if current_tool_use_index >= 0:
tool_uses_map[current_tool_use_index]["input"] += delta.get("partial_json", "")
elif event_type == "message_delta":
# Extract stop_reason from delta
delta = event.get("delta", {})
if "stop_reason" in delta:
stop_reason = delta.get("stop_reason")
logger.info(f"[Claude] Stream stop_reason: {stop_reason}")
# Message complete - yield tool calls if any
if tool_uses_map:
for idx in sorted(tool_uses_map.keys()):
tool_data = tool_uses_map[idx]
yield {
"id": event.get("id", ""),
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": request_params["model"],
"choices": [{
"index": 0,
"delta": {
"tool_calls": [{
"index": idx,
"id": tool_data["id"],
"type": "function",
"function": {
"name": tool_data["name"],
"arguments": tool_data["input"]
}
}]
},
"finish_reason": stop_reason
}]
}
elif event_type == "message_stop":
# Final event - log completion
logger.debug(f"[Claude] Stream completed with stop_reason: {stop_reason}")
except json.JSONDecodeError:
continue
except requests.RequestException as e:
logger.error(f"Claude streaming request error: {e}")
yield {
"error": True,
"message": f"Connection error: {str(e)}",
"status_code": 0
}
except Exception as e:
logger.error(f"Claude streaming error: {e}")
yield {
"error": True,
"message": str(e),
"status_code": 500
} | --- +++ @@ -209,6 +209,13 @@ return model
def _get_max_tokens(self, model: str) -> int:
+ """
+ Get max_tokens for the model.
+ Reference from pi-mono:
+ - Claude 3.5/3.7: 8192
+ - Claude 3 Opus: 4096
+ - Default: 8192
+ """
if model and (model.startswith("claude-3-5") or model.startswith("claude-3-7")):
return 8192
elif model and model.startswith("claude-3") and "opus" in model:
@@ -218,6 +225,18 @@ return 8192
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
+ """
+ Call Claude API with tool support for agent integration
+
+ Args:
+ messages: List of messages
+ tools: List of tool definitions
+ stream: Whether to use streaming
+ **kwargs: Additional parameters
+
+ Returns:
+ Formatted response compatible with OpenAI format or generator for streaming
+ """
actual_model = self._model_mapping(conf().get("model"))
# Extract system prompt from messages if present
@@ -269,6 +288,7 @@ }
def _handle_sync_response(self, request_params):
+ """Handle synchronous Claude API response"""
# Prepare headers
headers = {
"x-api-key": self.api_key,
@@ -340,6 +360,7 @@ return formatted_response
def _handle_stream_response(self, request_params):
+ """Handle streaming Claude API response using HTTP requests"""
# Prepare headers
headers = {
"x-api-key": self.api_key,
@@ -481,4 +502,4 @@ "error": True,
"message": str(e),
"status_code": 500
- }+ }
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/claudeapi/claude_api_bot.py |
Annotate my code with docstrings | # encoding:utf-8
import json
import time
import requests
from models.bot import Bot
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from .doubao_session import DoubaoSession
# Doubao (火山方舟 / Volcengine Ark) API Bot
class DoubaoBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(DoubaoSession, model=conf().get("model") or "doubao-seed-2-0-pro-260215")
model = conf().get("model") or "doubao-seed-2-0-pro-260215"
self.args = {
"model": model,
"temperature": conf().get("temperature", 0.8),
"top_p": conf().get("top_p", 1.0),
}
@property
def api_key(self):
return conf().get("ark_api_key")
@property
def base_url(self):
url = conf().get("ark_base_url", "https://ark.cn-beijing.volces.com/api/v3")
if url.endswith("/chat/completions"):
url = url.rsplit("/chat/completions", 1)[0]
return url.rstrip("/")
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[DOUBAO] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[DOUBAO] session query={}".format(session.messages))
model = context.get("doubao_model")
new_args = self.args.copy()
if model:
new_args["model"] = model
reply_content = self.reply_text(session, args=new_args)
logger.debug(
"[DOUBAO] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[DOUBAO] reply {} used 0 tokens.".format(reply_content))
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: DoubaoSession, args=None, retry_count: int = 0) -> dict:
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.api_key
}
body = args.copy()
body["messages"] = session.messages
# Disable thinking by default for better efficiency
body["thinking"] = {"type": "disabled"}
res = requests.post(
f"{self.base_url}/chat/completions",
headers=headers,
json=body
)
if res.status_code == 200:
response = res.json()
return {
"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response["choices"][0]["message"]["content"]
}
else:
response = res.json()
error = response.get("error", {})
logger.error(f"[DOUBAO] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
need_retry = False
if res.status_code >= 500:
logger.warn(f"[DOUBAO] do retry, times={retry_count}")
need_retry = retry_count < 2
elif res.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
elif res.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2
else:
need_retry = False
if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
else:
return result
except Exception as e:
logger.exception(e)
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text(session, args, retry_count + 1)
else:
return result
# ==================== Agent mode support ====================
def call_with_tools(self, messages, tools=None, stream: bool = False, **kwargs):
try:
# Convert messages from Claude format to OpenAI format
converted_messages = self._convert_messages_to_openai_format(messages)
# Inject system prompt if provided
system_prompt = kwargs.pop("system", None)
if system_prompt:
if not converted_messages or converted_messages[0].get("role") != "system":
converted_messages.insert(0, {"role": "system", "content": system_prompt})
else:
converted_messages[0] = {"role": "system", "content": system_prompt}
# Convert tools from Claude format to OpenAI format
converted_tools = None
if tools:
converted_tools = self._convert_tools_to_openai_format(tools)
# Resolve model / temperature
model = kwargs.pop("model", None) or self.args["model"]
max_tokens = kwargs.pop("max_tokens", None)
# Don't pop temperature, just ignore it - let API use default
kwargs.pop("temperature", None)
# Build request body (omit temperature, let the API use its own default)
request_body = {
"model": model,
"messages": converted_messages,
"stream": stream,
}
if max_tokens is not None:
request_body["max_tokens"] = max_tokens
# Add tools
if converted_tools:
request_body["tools"] = converted_tools
request_body["tool_choice"] = "auto"
# Explicitly disable thinking to avoid reasoning_content issues
# in multi-turn tool calls
request_body["thinking"] = {"type": "disabled"}
logger.debug(f"[DOUBAO] API call: model={model}, "
f"tools={len(converted_tools) if converted_tools else 0}, stream={stream}")
if stream:
return self._handle_stream_response(request_body)
else:
return self._handle_sync_response(request_body)
except Exception as e:
logger.error(f"[DOUBAO] call_with_tools error: {e}")
import traceback
logger.error(traceback.format_exc())
def error_generator():
yield {"error": True, "message": str(e), "status_code": 500}
return error_generator()
# -------------------- streaming --------------------
def _handle_stream_response(self, request_body: dict):
try:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
url = f"{self.base_url}/chat/completions"
response = requests.post(url, headers=headers, json=request_body, stream=True, timeout=120)
if response.status_code != 200:
error_msg = response.text
logger.error(f"[DOUBAO] API error: status={response.status_code}, msg={error_msg}")
yield {"error": True, "message": error_msg, "status_code": response.status_code}
return
current_tool_calls = {}
finish_reason = None
for line in response.iter_lines():
if not line:
continue
line = line.decode("utf-8")
if not line.startswith("data: "):
continue
data_str = line[6:] # Remove "data: " prefix
if data_str.strip() == "[DONE]":
break
try:
chunk = json.loads(data_str)
except json.JSONDecodeError as e:
logger.warning(f"[DOUBAO] JSON decode error: {e}, data: {data_str[:200]}")
continue
# Check for error in chunk
if chunk.get("error"):
error_data = chunk["error"]
error_msg = error_data.get("message", "Unknown error") if isinstance(error_data, dict) else str(error_data)
logger.error(f"[DOUBAO] stream error: {error_msg}")
yield {"error": True, "message": error_msg, "status_code": 500}
return
if not chunk.get("choices"):
continue
choice = chunk["choices"][0]
delta = choice.get("delta", {})
# Skip reasoning_content (thinking) - don't log or forward
if delta.get("reasoning_content"):
continue
# Handle text content
if "content" in delta and delta["content"]:
yield {
"choices": [{
"index": 0,
"delta": {
"role": "assistant",
"content": delta["content"]
}
}]
}
# Handle tool_calls (streamed incrementally)
if "tool_calls" in delta:
for tool_call_chunk in delta["tool_calls"]:
index = tool_call_chunk.get("index", 0)
if index not in current_tool_calls:
current_tool_calls[index] = {
"id": tool_call_chunk.get("id", ""),
"type": "tool_use",
"name": tool_call_chunk.get("function", {}).get("name", ""),
"input": ""
}
# Accumulate arguments
if "function" in tool_call_chunk and "arguments" in tool_call_chunk["function"]:
current_tool_calls[index]["input"] += tool_call_chunk["function"]["arguments"]
# Yield OpenAI-format tool call delta
yield {
"choices": [{
"index": 0,
"delta": {
"tool_calls": [tool_call_chunk]
}
}]
}
# Capture finish_reason
if choice.get("finish_reason"):
finish_reason = choice["finish_reason"]
# Final chunk with finish_reason
yield {
"choices": [{
"index": 0,
"delta": {},
"finish_reason": finish_reason
}]
}
except requests.exceptions.Timeout:
logger.error("[DOUBAO] Request timeout")
yield {"error": True, "message": "Request timeout", "status_code": 500}
except Exception as e:
logger.error(f"[DOUBAO] stream response error: {e}")
import traceback
logger.error(traceback.format_exc())
yield {"error": True, "message": str(e), "status_code": 500}
# -------------------- sync --------------------
def _handle_sync_response(self, request_body: dict):
try:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
request_body.pop("stream", None)
url = f"{self.base_url}/chat/completions"
response = requests.post(url, headers=headers, json=request_body, timeout=120)
if response.status_code != 200:
error_msg = response.text
logger.error(f"[DOUBAO] API error: status={response.status_code}, msg={error_msg}")
yield {"error": True, "message": error_msg, "status_code": response.status_code}
return
result = response.json()
message = result["choices"][0]["message"]
finish_reason = result["choices"][0]["finish_reason"]
response_data = {"role": "assistant", "content": []}
# Add text content
if message.get("content"):
response_data["content"].append({
"type": "text",
"text": message["content"]
})
# Add tool calls
if message.get("tool_calls"):
for tool_call in message["tool_calls"]:
response_data["content"].append({
"type": "tool_use",
"id": tool_call["id"],
"name": tool_call["function"]["name"],
"input": json.loads(tool_call["function"]["arguments"])
})
# Map finish_reason
if finish_reason == "tool_calls":
response_data["stop_reason"] = "tool_use"
elif finish_reason == "stop":
response_data["stop_reason"] = "end_turn"
else:
response_data["stop_reason"] = finish_reason
yield response_data
except requests.exceptions.Timeout:
logger.error("[DOUBAO] Request timeout")
yield {"error": True, "message": "Request timeout", "status_code": 500}
except Exception as e:
logger.error(f"[DOUBAO] sync response error: {e}")
import traceback
logger.error(traceback.format_exc())
yield {"error": True, "message": str(e), "status_code": 500}
# -------------------- format conversion --------------------
def _convert_messages_to_openai_format(self, messages):
if not messages:
return []
converted = []
for msg in messages:
role = msg.get("role")
content = msg.get("content")
# Already a simple string - pass through
if isinstance(content, str):
converted.append(msg)
continue
if not isinstance(content, list):
converted.append(msg)
continue
if role == "user":
text_parts = []
tool_results = []
for block in content:
if not isinstance(block, dict):
continue
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_result":
tool_call_id = block.get("tool_use_id") or ""
result_content = block.get("content", "")
if not isinstance(result_content, str):
result_content = json.dumps(result_content, ensure_ascii=False)
tool_results.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": result_content
})
# Tool results first (must come right after assistant with tool_calls)
for tr in tool_results:
converted.append(tr)
if text_parts:
converted.append({"role": "user", "content": "\n".join(text_parts)})
elif role == "assistant":
openai_msg = {"role": "assistant"}
text_parts = []
tool_calls = []
for block in content:
if not isinstance(block, dict):
continue
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_use":
tool_calls.append({
"id": block.get("id"),
"type": "function",
"function": {
"name": block.get("name"),
"arguments": json.dumps(block.get("input", {}))
}
})
if text_parts:
openai_msg["content"] = "\n".join(text_parts)
elif not tool_calls:
openai_msg["content"] = ""
if tool_calls:
openai_msg["tool_calls"] = tool_calls
if not text_parts:
openai_msg["content"] = None
converted.append(openai_msg)
else:
converted.append(msg)
return converted
def _convert_tools_to_openai_format(self, tools):
if not tools:
return None
converted = []
for tool in tools:
# Already in OpenAI format
if "type" in tool and tool["type"] == "function":
converted.append(tool)
else:
converted.append({
"type": "function",
"function": {
"name": tool.get("name"),
"description": tool.get("description"),
"parameters": tool.get("input_schema", {})
}
})
return converted | --- +++ @@ -86,6 +86,13 @@ return reply
def reply_text(self, session: DoubaoSession, args=None, retry_count: int = 0) -> dict:
+ """
+ Call Doubao chat completion API to get the answer
+ :param session: a conversation session
+ :param args: model args
+ :param retry_count: retry count
+ :return: {}
+ """
try:
headers = {
"Content-Type": "application/json",
@@ -143,6 +150,24 @@ # ==================== Agent mode support ====================
def call_with_tools(self, messages, tools=None, stream: bool = False, **kwargs):
+ """
+ Call Doubao API with tool support for agent integration.
+
+ This method handles:
+ 1. Format conversion (Claude format -> OpenAI format)
+ 2. System prompt injection
+ 3. Streaming SSE response with tool_calls
+ 4. Thinking (reasoning) is disabled by default for efficiency
+
+ Args:
+ messages: List of messages (may be in Claude format from agent)
+ tools: List of tool definitions (may be in Claude format from agent)
+ stream: Whether to use streaming
+ **kwargs: Additional parameters (max_tokens, temperature, system, model, etc.)
+
+ Returns:
+ Generator yielding OpenAI-format chunks (for streaming)
+ """
try:
# Convert messages from Claude format to OpenAI format
converted_messages = self._convert_messages_to_openai_format(messages)
@@ -204,6 +229,7 @@ # -------------------- streaming --------------------
def _handle_stream_response(self, request_body: dict):
+ """Handle streaming SSE response from Doubao API and yield OpenAI-format chunks."""
try:
headers = {
"Content-Type": "application/json",
@@ -321,6 +347,7 @@ # -------------------- sync --------------------
def _handle_sync_response(self, request_body: dict):
+ """Handle synchronous API response and yield a single result dict."""
try:
headers = {
"Content-Type": "application/json",
@@ -382,6 +409,12 @@ # -------------------- format conversion --------------------
def _convert_messages_to_openai_format(self, messages):
+ """
+ Convert messages from Claude format to OpenAI format.
+
+ Claude format uses content blocks: tool_use / tool_result / text
+ OpenAI format uses tool_calls in assistant, role=tool for results
+ """
if not messages:
return []
@@ -464,6 +497,12 @@ return converted
def _convert_tools_to_openai_format(self, tools):
+ """
+ Convert tools from Claude format to OpenAI format.
+
+ Claude: {name, description, input_schema}
+ OpenAI: {type: "function", function: {name, description, parameters}}
+ """
if not tools:
return None
@@ -482,4 +521,4 @@ }
})
- return converted+ return converted
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/doubao/doubao_bot.py |
Write docstrings that follow conventions | # encoding:utf-8
import time
import json
import openai
from models.bot import Bot
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from .modelscope_session import ModelScopeSession
import requests
# ModelScope对话模型API
class ModelScopeBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(ModelScopeSession, model=conf().get("model") or "Qwen/Qwen2.5-7B-Instruct")
model = conf().get("model") or "Qwen/Qwen2.5-7B-Instruct"
if model == "modelscope":
model = "Qwen/Qwen2.5-7B-Instruct"
self.args = {
"model": model, # 对话模型的名称
"temperature": conf().get("temperature", 0.3), # 如果设置,值域须为 [0, 1] 我们推荐 0.3,以达到较合适的效果。
"top_p": conf().get("top_p", 1.0), # 使用默认值
}
@property
def api_key(self):
return conf().get("modelscope_api_key")
@property
def base_url(self):
return conf().get("modelscope_base_url", "https://api-inference.modelscope.cn/v1/chat/completions")
"""
需要获取ModelScope支持API-inference的模型名称列表,请到魔搭社区官网模型中心查看 https://modelscope.cn/models?filter=inference_type&page=1。
或者使用命令 curl https://api-inference.modelscope.cn/v1/models 对模型列表和ID进行获取。查看commend/const.py文件也可以获取模型列表。
获取ModelScope的免费API Key,请到魔搭社区官网用户中心查看获取方式 https://modelscope.cn/docs/model-service/API-Inference/intro。
"""
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[MODELSCOPE_AI] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[MODELSCOPE_AI] session query={}".format(session.messages))
model = context.get("modelscope_model")
new_args = self.args.copy()
if model:
new_args["model"] = model
if new_args["model"] == "Qwen/QwQ-32B":
reply_content = self.reply_text_stream(session, args=new_args)
else:
reply_content = self.reply_text(session, args=new_args)
logger.debug(
"[MODELSCOPE_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
# 只有当 content 为空且 completion_tokens 为 0 时才标记为错误
if len(reply_content["content"]) == 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
else:
reply = Reply(ReplyType.TEXT, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[MODELSCOPE_AI] reply {} used 0 tokens.".format(reply_content))
return reply
elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: ModelScopeSession, args=None, retry_count=0) -> dict:
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.api_key
}
body = args
body["messages"] = session.messages
res = requests.post(
self.base_url,
headers=headers,
data=json.dumps(body)
)
if res.status_code == 200:
response = res.json()
return {
"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response["choices"][0]["message"]["content"]
}
else:
response = res.json()
if "errors" in response:
error = response.get("errors")
elif "error" in response:
error = response.get("error")
else:
error = "Unknown error"
logger.error(f"[MODELSCOPE_AI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
need_retry = False
if res.status_code >= 500:
# server error, need retry
logger.warn(f"[MODELSCOPE_AI] do retry, times={retry_count}")
need_retry = retry_count < 2
elif res.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
elif res.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2
else:
need_retry = False
if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
else:
return result
except Exception as e:
logger.exception(e)
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text(session, args, retry_count + 1)
else:
return result
def reply_text_stream(self, session: ModelScopeSession, args=None, retry_count=0) -> dict:
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.api_key
}
body = args
body["messages"] = session.messages
body["stream"] = True # 启用流式响应
res = requests.post(
self.base_url,
headers=headers,
data=json.dumps(body),
stream=True
)
if res.status_code == 200:
content = ""
for line in res.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith("data: "):
try:
json_data = json.loads(decoded_line[6:])
delta_content = json_data.get("choices", [{}])[0].get("delta", {}).get("content", "")
if delta_content:
content += delta_content
except json.JSONDecodeError as e:
pass
return {
"total_tokens": 1, # 流式响应通常不返回token使用情况
"completion_tokens": 1,
"content": content
}
else:
response = res.json()
if "errors" in response:
error = response.get("errors")
elif "error" in response:
error = response.get("error")
else:
error = "Unknown error"
logger.error(f"[MODELSCOPE_AI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
need_retry = False
if res.status_code >= 500:
# server error, need retry
logger.warn(f"[MODELSCOPE_AI] do retry, times={retry_count}")
need_retry = retry_count < 2
elif res.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
elif res.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2
else:
need_retry = False
if need_retry:
time.sleep(3)
return self.reply_text_stream(session, args, retry_count + 1)
else:
return result
except Exception as e:
logger.exception(e)
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text_stream(session, args, retry_count + 1)
else:
return result
def create_img(self, query, retry_count=0):
try:
logger.info("[ModelScopeImage] image_query={}".format(query))
headers = {
"Content-Type": "application/json; charset=utf-8", # 明确指定编码
"Authorization": f"Bearer {self.api_key}"
}
payload = {
"prompt": query, # required
"n": 1,
"model": conf().get("text_to_image"),
}
url = "https://api-inference.modelscope.cn/v1/images/generations"
# 手动序列化并保留中文(禁用 ASCII 转义)
json_payload = json.dumps(payload, ensure_ascii=False).encode('utf-8')
# 使用 data 参数发送原始字符串(requests 会自动处理编码)
res = requests.post(url, headers=headers, data=json_payload)
response_data = res.json()
image_url = response_data['images'][0]['url']
logger.info("[ModelScopeImage] image_url={}".format(image_url))
return True, image_url
except Exception as e:
logger.error(format(e))
return False, "画图出现问题,请休息一下再问我吧" | --- +++ @@ -105,6 +105,13 @@ return reply
def reply_text(self, session: ModelScopeSession, args=None, retry_count=0) -> dict:
+ """
+ call openai's ChatCompletion to get the answer
+ :param session: a conversation session
+ :param session_id: session id
+ :param retry_count: retry count
+ :return: {}
+ """
try:
headers = {
"Content-Type": "application/json",
@@ -166,6 +173,13 @@ return result
def reply_text_stream(self, session: ModelScopeSession, args=None, retry_count=0) -> dict:
+ """
+ call ModelScope's ChatCompletion to get the answer with stream response
+ :param session: a conversation session
+ :param session_id: session id
+ :param retry_count: retry count
+ :return: {}
+ """
try:
headers = {
"Content-Type": "application/json",
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/modelscope/modelscope_bot.py |
Document this code for team use |
from bridge.context import Context
from bridge.reply import Reply
class Bot(object):
def reply(self, query, context: Context = None) -> Reply:
raise NotImplementedError | --- +++ @@ -1,3 +1,6 @@+"""
+Auto-replay chat robot abstract class
+"""
from bridge.context import Context
@@ -6,4 +9,9 @@
class Bot(object):
def reply(self, query, context: Context = None) -> Reply:
- raise NotImplementedError+ """
+ bot auto-reply content
+ :param req: received message
+ :return: reply content
+ """
+ raise NotImplementedError
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/bot.py |
Add docstrings for production code | from models.session_manager import Session
from common.log import logger
class OpenAISession(Session):
def __init__(self, session_id, system_prompt=None, model="text-davinci-003"):
super().__init__(session_id, system_prompt)
self.model = model
self.reset()
def __str__(self):
# 构造对话模型的输入
prompt = ""
for item in self.messages:
if item["role"] == "system":
prompt += item["content"] + "<|endoftext|>\n\n\n"
elif item["role"] == "user":
prompt += "Q: " + item["content"] + "\n"
elif item["role"] == "assistant":
prompt += "\n\nA: " + item["content"] + "<|endoftext|>\n"
if len(self.messages) > 0 and self.messages[-1]["role"] == "user":
prompt += "A: "
return prompt
def discard_exceeding(self, max_tokens, cur_tokens=None):
precise = True
try:
cur_tokens = self.calc_tokens()
except Exception as e:
precise = False
if cur_tokens is None:
raise e
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
while cur_tokens > max_tokens:
if len(self.messages) > 1:
self.messages.pop(0)
elif len(self.messages) == 1 and self.messages[0]["role"] == "assistant":
self.messages.pop(0)
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = len(str(self))
break
elif len(self.messages) == 1 and self.messages[0]["role"] == "user":
logger.warn("user question exceed max_tokens. total_tokens={}".format(cur_tokens))
break
else:
logger.debug("max_tokens={}, total_tokens={}, len(conversation)={}".format(max_tokens, cur_tokens, len(self.messages)))
break
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = len(str(self))
return cur_tokens
def calc_tokens(self):
return num_tokens_from_string(str(self), self.model)
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_string(string: str, model: str) -> int:
import tiktoken
encoding = tiktoken.encoding_for_model(model)
num_tokens = len(encoding.encode(string, disallowed_special=()))
return num_tokens | --- +++ @@ -10,6 +10,11 @@
def __str__(self):
# 构造对话模型的输入
+ """
+ e.g. Q: xxx
+ A: xxx
+ Q: xxx
+ """
prompt = ""
for item in self.messages:
if item["role"] == "system":
@@ -60,8 +65,9 @@
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_string(string: str, model: str) -> int:
+ """Returns the number of tokens in a text string."""
import tiktoken
encoding = tiktoken.encoding_for_model(model)
num_tokens = len(encoding.encode(string, disallowed_special=()))
- return num_tokens+ return num_tokens
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/openai/open_ai_session.py |
Add docstrings to clarify complex logic | # encoding:utf-8
import time
import json
import requests
from models.bot import Bot
from models.minimax.minimax_session import MinimaxSession
from models.session_manager import SessionManager
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from common import const
from agent.protocol.message_utils import drop_orphaned_tool_results_openai
# MiniMax对话模型API
class MinimaxBot(Bot):
def __init__(self):
super().__init__()
self.args = {
"model": conf().get("model") or "MiniMax-M2.1",
"temperature": conf().get("temperature", 0.3),
"top_p": conf().get("top_p", 0.95),
}
self.sessions = SessionManager(MinimaxSession, model=const.MiniMax)
@property
def api_key(self):
key = conf().get("minimax_api_key")
if not key:
key = conf().get("Minimax_api_key")
return key
@property
def api_base(self):
return conf().get("minimax_api_base", "https://api.minimaxi.com/v1")
def reply(self, query, context: Context = None) -> Reply:
# acquire reply content
logger.info("[MINIMAX] query={}".format(query))
if context.type == ContextType.TEXT:
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[MINIMAX] session query={}".format(session))
model = context.get("Minimax_model")
new_args = self.args.copy()
if model:
new_args["model"] = model
reply_content = self.reply_text(session, args=new_args)
logger.debug(
"[MINIMAX] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[MINIMAX] reply {} used 0 tokens.".format(reply_content))
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: MinimaxSession, args=None, retry_count=0) -> dict:
try:
if args is None:
args = self.args
# Build request
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
request_body = {
"model": args.get("model", self.args["model"]),
"messages": session.messages,
"temperature": args.get("temperature", self.args["temperature"]),
"top_p": args.get("top_p", self.args["top_p"]),
}
url = f"{self.api_base}/chat/completions"
logger.debug(f"[MINIMAX] Calling {url} with model={request_body['model']}")
response = requests.post(url, headers=headers, json=request_body, timeout=60)
if response.status_code == 200:
result = response.json()
content = result["choices"][0]["message"]["content"]
total_tokens = result["usage"]["total_tokens"]
completion_tokens = result["usage"]["completion_tokens"]
logger.debug(f"[MINIMAX] reply_text: content_length={len(content)}, tokens={total_tokens}")
return {
"total_tokens": total_tokens,
"completion_tokens": completion_tokens,
"content": content,
}
else:
error_msg = response.text
logger.error(f"[MINIMAX] API error: status={response.status_code}, msg={error_msg}")
# Parse error for better messages
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
need_retry = False
if response.status_code >= 500:
logger.warning(f"[MINIMAX] Server error, retry={retry_count}")
need_retry = retry_count < 2
elif response.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
need_retry = False
elif response.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2
else:
need_retry = False
if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
else:
return result
except requests.exceptions.Timeout:
logger.error("[MINIMAX] Request timeout")
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "请求超时,请稍后再试"}
if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
else:
return result
except Exception as e:
logger.error(f"[MINIMAX] reply_text error: {e}")
import traceback
logger.error(traceback.format_exc())
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
else:
return result
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
try:
# Convert messages from Claude format to OpenAI format
converted_messages = self._convert_messages_to_openai_format(messages)
# Extract and inject system prompt if provided
system_prompt = kwargs.pop("system", None)
if system_prompt:
# Add system message at the beginning
converted_messages.insert(0, {"role": "system", "content": system_prompt})
# Convert tools from Claude format to OpenAI format
converted_tools = None
if tools:
converted_tools = self._convert_tools_to_openai_format(tools)
# Prepare API parameters
model = kwargs.pop("model", None) or self.args["model"]
max_tokens = kwargs.pop("max_tokens", 100000)
temperature = kwargs.pop("temperature", self.args["temperature"])
# Build request body
request_body = {
"model": model,
"messages": converted_messages,
"max_tokens": max_tokens,
"temperature": temperature,
"stream": stream,
}
# Add tools if provided
if converted_tools:
request_body["tools"] = converted_tools
# Add reasoning_split=True for better thinking control (M2.1 feature)
# This separates thinking content into reasoning_details field
request_body["reasoning_split"] = True
logger.debug(f"[MINIMAX] API call: model={model}, tools={len(converted_tools) if converted_tools else 0}, stream={stream}")
# Check if we should show thinking process
show_thinking = kwargs.pop("show_thinking", conf().get("minimax_show_thinking", False))
if stream:
return self._handle_stream_response(request_body, show_thinking=show_thinking)
else:
return self._handle_sync_response(request_body)
except Exception as e:
logger.error(f"[MINIMAX] call_with_tools error: {e}")
import traceback
logger.error(traceback.format_exc())
def error_generator():
yield {"error": True, "message": str(e), "status_code": 500}
return error_generator()
def _convert_messages_to_openai_format(self, messages):
converted = []
for msg in messages:
role = msg.get("role")
content = msg.get("content")
if role == "user":
# Handle user message
if isinstance(content, list):
# Extract text from content blocks
text_parts = []
tool_results = []
for block in content:
if isinstance(block, dict):
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_result":
# Tool result should be a separate message with role="tool"
tool_call_id = block.get("tool_use_id") or ""
if not tool_call_id:
logger.warning(f"[MINIMAX] tool_result missing tool_use_id")
result_content = block.get("content", "")
if not isinstance(result_content, str):
result_content = json.dumps(result_content, ensure_ascii=False)
tool_results.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": result_content
})
if text_parts:
converted.append({
"role": "user",
"content": "\n".join(text_parts)
})
# Add all tool results (not just the last one)
for tool_result in tool_results:
converted.append(tool_result)
else:
# Simple text content
converted.append({
"role": "user",
"content": str(content)
})
elif role == "assistant":
# Handle assistant message
openai_msg = {"role": "assistant"}
if isinstance(content, list):
# Parse content blocks
text_parts = []
tool_calls = []
for block in content:
if isinstance(block, dict):
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_use":
# Convert to OpenAI tool_calls format
tool_calls.append({
"id": block.get("id"),
"type": "function",
"function": {
"name": block.get("name"),
"arguments": json.dumps(block.get("input", {}))
}
})
# Set content (can be empty if only tool calls)
if text_parts:
openai_msg["content"] = "\n".join(text_parts)
elif not tool_calls:
openai_msg["content"] = ""
# Set tool_calls
if tool_calls:
openai_msg["tool_calls"] = tool_calls
# When tool_calls exist and content is empty, set to None
if not text_parts:
openai_msg["content"] = None
else:
# Simple text content
openai_msg["content"] = str(content) if content else ""
converted.append(openai_msg)
return drop_orphaned_tool_results_openai(converted)
def _convert_tools_to_openai_format(self, tools):
converted = []
for tool in tools:
converted.append({
"type": "function",
"function": {
"name": tool.get("name"),
"description": tool.get("description"),
"parameters": tool.get("input_schema", {})
}
})
return converted
def _handle_sync_response(self, request_body):
try:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
# Remove stream from body for sync request
request_body.pop("stream", None)
url = f"{self.api_base}/chat/completions"
response = requests.post(url, headers=headers, json=request_body, timeout=60)
if response.status_code != 200:
error_msg = response.text
logger.error(f"[MINIMAX] API error: status={response.status_code}, msg={error_msg}")
yield {"error": True, "message": error_msg, "status_code": response.status_code}
return
result = response.json()
message = result["choices"][0]["message"]
finish_reason = result["choices"][0]["finish_reason"]
# Build response in Claude-like format
response_data = {
"role": "assistant",
"content": []
}
# Add reasoning_details (thinking) if present
if "reasoning_details" in message:
for reasoning in message["reasoning_details"]:
if "text" in reasoning:
response_data["content"].append({
"type": "thinking",
"thinking": reasoning["text"]
})
# Add text content if present
if message.get("content"):
response_data["content"].append({
"type": "text",
"text": message["content"]
})
# Add tool calls if present
if message.get("tool_calls"):
for tool_call in message["tool_calls"]:
response_data["content"].append({
"type": "tool_use",
"id": tool_call["id"],
"name": tool_call["function"]["name"],
"input": json.loads(tool_call["function"]["arguments"])
})
# Set stop_reason
if finish_reason == "tool_calls":
response_data["stop_reason"] = "tool_use"
elif finish_reason == "stop":
response_data["stop_reason"] = "end_turn"
else:
response_data["stop_reason"] = finish_reason
yield response_data
except requests.exceptions.Timeout:
logger.error("[MINIMAX] Request timeout")
yield {"error": True, "message": "Request timeout", "status_code": 500}
except Exception as e:
logger.error(f"[MINIMAX] sync response error: {e}")
import traceback
logger.error(traceback.format_exc())
yield {"error": True, "message": str(e), "status_code": 500}
def _handle_stream_response(self, request_body, show_thinking=False):
try:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
url = f"{self.api_base}/chat/completions"
response = requests.post(url, headers=headers, json=request_body, stream=True, timeout=60)
if response.status_code != 200:
error_msg = response.text
logger.error(f"[MINIMAX] API error: status={response.status_code}, msg={error_msg}")
yield {"error": True, "message": error_msg, "status_code": response.status_code}
return
current_content = []
current_tool_calls = {}
current_reasoning = []
finish_reason = None
chunk_count = 0
# Process SSE stream
for line in response.iter_lines():
if not line:
continue
line = line.decode('utf-8')
if not line.startswith('data: '):
continue
data_str = line[6:] # Remove 'data: ' prefix
if data_str.strip() == '[DONE]':
break
try:
chunk = json.loads(data_str)
chunk_count += 1
except json.JSONDecodeError as e:
logger.warning(f"[MINIMAX] JSON decode error: {e}, data: {data_str[:100]}")
continue
# Check for error response (MiniMax format)
if chunk.get("type") == "error" or "error" in chunk:
error_data = chunk.get("error", {})
error_msg = error_data.get("message", "Unknown error")
error_type = error_data.get("type", "")
http_code = error_data.get("http_code", "")
logger.error(f"[MINIMAX] API error: {error_msg} (type: {error_type}, code: {http_code})")
yield {
"error": True,
"message": error_msg,
"status_code": int(http_code) if http_code.isdigit() else 500
}
return
if not chunk.get("choices"):
continue
choice = chunk["choices"][0]
delta = choice.get("delta", {})
# Handle reasoning_details (thinking)
if "reasoning_details" in delta:
for reasoning in delta["reasoning_details"]:
if "text" in reasoning:
reasoning_id = reasoning.get("id", "reasoning-text-1")
reasoning_index = reasoning.get("index", 0)
reasoning_text = reasoning["text"]
# Accumulate reasoning text
if reasoning_index >= len(current_reasoning):
current_reasoning.append({"id": reasoning_id, "text": ""})
current_reasoning[reasoning_index]["text"] += reasoning_text
# Optionally yield thinking as visible content
if show_thinking:
# Yield thinking text as-is (without emoji decoration)
# The reasoning text will be displayed to users
yield {
"choices": [{
"index": 0,
"delta": {
"role": "assistant",
"content": reasoning_text
}
}]
}
# Handle text content
if "content" in delta and delta["content"]:
# Start new content block if needed
if not any(block.get("type") == "text" for block in current_content):
current_content.append({"type": "text", "text": ""})
# Accumulate text
for block in current_content:
if block.get("type") == "text":
block["text"] += delta["content"]
break
# Yield OpenAI-format delta (for agent_stream.py compatibility)
yield {
"choices": [{
"index": 0,
"delta": {
"role": "assistant",
"content": delta["content"]
}
}]
}
# Handle tool calls
if "tool_calls" in delta:
for tool_call_chunk in delta["tool_calls"]:
index = tool_call_chunk.get("index", 0)
if index not in current_tool_calls:
# Start new tool call
current_tool_calls[index] = {
"id": tool_call_chunk.get("id", ""),
"type": "tool_use",
"name": tool_call_chunk.get("function", {}).get("name", ""),
"input": ""
}
# Accumulate tool call arguments
if "function" in tool_call_chunk and "arguments" in tool_call_chunk["function"]:
current_tool_calls[index]["input"] += tool_call_chunk["function"]["arguments"]
# Yield OpenAI-format tool call delta
yield {
"choices": [{
"index": 0,
"delta": {
"tool_calls": [tool_call_chunk]
}
}]
}
# Handle finish_reason
if choice.get("finish_reason"):
finish_reason = choice["finish_reason"]
# Log complete reasoning_details for debugging
if current_reasoning:
logger.debug(f"[MINIMAX] ===== Complete Reasoning Details =====")
for i, reasoning in enumerate(current_reasoning):
reasoning_text = reasoning.get("text", "")
logger.debug(f"[MINIMAX] Reasoning {i+1} (length={len(reasoning_text)}):")
logger.debug(f"[MINIMAX] {reasoning_text}")
logger.debug(f"[MINIMAX] ===== End Reasoning Details =====")
# Yield final chunk with finish_reason (OpenAI format)
yield {
"choices": [{
"index": 0,
"delta": {},
"finish_reason": finish_reason
}]
}
except requests.exceptions.Timeout:
logger.error("[MINIMAX] Request timeout")
yield {"error": True, "message": "Request timeout", "status_code": 500}
except Exception as e:
logger.error(f"[MINIMAX] stream response error: {e}")
import traceback
logger.error(traceback.format_exc())
yield {"error": True, "message": str(e), "status_code": 500} | --- +++ @@ -86,6 +86,13 @@ return reply
def reply_text(self, session: MinimaxSession, args=None, retry_count=0) -> dict:
+ """
+ Call MiniMax API to get the answer using REST API
+ :param session: a conversation session
+ :param args: request arguments
+ :param retry_count: retry count
+ :return: {}
+ """
try:
if args is None:
args = self.args
@@ -169,6 +176,24 @@ return result
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
+ """
+ Call MiniMax API with tool support for agent integration
+
+ This method handles:
+ 1. Format conversion (Claude format → OpenAI format)
+ 2. System prompt injection
+ 3. API calling with REST API
+ 4. Interleaved Thinking support (reasoning_split=True)
+
+ Args:
+ messages: List of messages (may be in Claude format from agent)
+ tools: List of tool definitions (may be in Claude format from agent)
+ stream: Whether to use streaming
+ **kwargs: Additional parameters (max_tokens, temperature, system, etc.)
+
+ Returns:
+ Formatted response or generator for streaming
+ """
try:
# Convert messages from Claude format to OpenAI format
converted_messages = self._convert_messages_to_openai_format(messages)
@@ -226,6 +251,19 @@ return error_generator()
def _convert_messages_to_openai_format(self, messages):
+ """
+ Convert messages from Claude format to OpenAI format
+
+ Claude format:
+ - role: "user" | "assistant"
+ - content: string | list of content blocks
+
+ OpenAI format:
+ - role: "user" | "assistant" | "tool"
+ - content: string
+ - tool_calls: list (for assistant)
+ - tool_call_id: string (for tool results)
+ """
converted = []
for msg in messages:
@@ -319,6 +357,26 @@ return drop_orphaned_tool_results_openai(converted)
def _convert_tools_to_openai_format(self, tools):
+ """
+ Convert tools from Claude format to OpenAI format
+
+ Claude format:
+ {
+ "name": "tool_name",
+ "description": "description",
+ "input_schema": {...}
+ }
+
+ OpenAI format:
+ {
+ "type": "function",
+ "function": {
+ "name": "tool_name",
+ "description": "description",
+ "parameters": {...}
+ }
+ }
+ """
converted = []
for tool in tools:
@@ -334,6 +392,7 @@ return converted
def _handle_sync_response(self, request_body):
+ """Handle synchronous API response"""
try:
headers = {
"Content-Type": "application/json",
@@ -408,6 +467,12 @@ yield {"error": True, "message": str(e), "status_code": 500}
def _handle_stream_response(self, request_body, show_thinking=False):
+ """Handle streaming API response
+
+ Args:
+ request_body: API request parameters
+ show_thinking: Whether to show thinking/reasoning process to users
+ """
try:
headers = {
"Content-Type": "application/json",
@@ -578,4 +643,4 @@ logger.error(f"[MINIMAX] stream response error: {e}")
import traceback
logger.error(traceback.format_exc())
- yield {"error": True, "message": str(e), "status_code": 500}+ yield {"error": True, "message": str(e), "status_code": 500}
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/minimax/minimax_bot.py |
Generate documentation strings for clarity | #!/usr/bin/env python3
import sys
import os
import zipfile
from pathlib import Path
# Add script directory to path for imports
script_dir = Path(__file__).parent
sys.path.insert(0, str(script_dir))
from quick_validate import validate_skill
def package_skill(skill_path, output_dir=None):
skill_path = Path(skill_path).resolve()
# Validate skill folder exists
if not skill_path.exists():
print(f"❌ Error: Skill folder not found: {skill_path}")
return None
if not skill_path.is_dir():
print(f"❌ Error: Path is not a directory: {skill_path}")
return None
# Validate SKILL.md exists
skill_md = skill_path / "SKILL.md"
if not skill_md.exists():
print(f"❌ Error: SKILL.md not found in {skill_path}")
return None
# Run validation before packaging
print("🔍 Validating skill...")
valid, message = validate_skill(skill_path)
if not valid:
print(f"❌ Validation failed: {message}")
print(" Please fix the validation errors before packaging.")
return None
print(f"✅ {message}\n")
# Determine output location
skill_name = skill_path.name
if output_dir:
output_path = Path(output_dir).resolve()
output_path.mkdir(parents=True, exist_ok=True)
else:
output_path = Path.cwd()
skill_filename = output_path / f"{skill_name}.skill"
# Create the .skill file (zip format)
try:
with zipfile.ZipFile(skill_filename, 'w', zipfile.ZIP_DEFLATED) as zipf:
# Walk through the skill directory
for file_path in skill_path.rglob('*'):
if file_path.is_file():
# Calculate the relative path within the zip
arcname = file_path.relative_to(skill_path.parent)
zipf.write(file_path, arcname)
print(f" Added: {arcname}")
print(f"\n✅ Successfully packaged skill to: {skill_filename}")
return skill_filename
except Exception as e:
print(f"❌ Error creating .skill file: {e}")
return None
def main():
if len(sys.argv) < 2:
print("Usage: python utils/package_skill.py <path/to/skill-folder> [output-directory]")
print("\nExample:")
print(" python utils/package_skill.py skills/public/my-skill")
print(" python utils/package_skill.py skills/public/my-skill ./dist")
sys.exit(1)
skill_path = sys.argv[1]
output_dir = sys.argv[2] if len(sys.argv) > 2 else None
print(f"📦 Packaging skill: {skill_path}")
if output_dir:
print(f" Output directory: {output_dir}")
print()
result = package_skill(skill_path, output_dir)
if result:
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main() | --- +++ @@ -1,4 +1,14 @@ #!/usr/bin/env python3
+"""
+Skill Packager - Creates a distributable .skill file of a skill folder
+
+Usage:
+ python utils/package_skill.py <path/to/skill-folder> [output-directory]
+
+Example:
+ python utils/package_skill.py skills/public/my-skill
+ python utils/package_skill.py skills/public/my-skill ./dist
+"""
import sys
import os
@@ -13,6 +23,16 @@
def package_skill(skill_path, output_dir=None):
+ """
+ Package a skill folder into a .skill file.
+
+ Args:
+ skill_path: Path to the skill folder
+ output_dir: Optional output directory for the .skill file (defaults to current directory)
+
+ Returns:
+ Path to the created .skill file, or None if error
+ """
skill_path = Path(skill_path).resolve()
# Validate skill folder exists
@@ -93,4 +113,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/skills/skill-creator/scripts/package_skill.py |
Write docstrings describing functionality | # encoding:utf-8
import json
import openai
from common.log import logger
from agent.protocol.message_utils import drop_orphaned_tool_results_openai
class OpenAICompatibleBot:
def get_api_config(self):
raise NotImplementedError("Subclasses must implement get_api_config()")
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
try:
# Get API configuration from subclass
api_config = self.get_api_config()
# Convert messages from Claude format to OpenAI format
messages = self._convert_messages_to_openai_format(messages)
# Convert tools from Claude format to OpenAI format
if tools:
tools = self._convert_tools_to_openai_format(tools)
# Handle system prompt (OpenAI uses system message, Claude uses separate parameter)
system_prompt = kwargs.get('system')
if system_prompt:
# Add system message at the beginning if not already present
if not messages or messages[0].get('role') != 'system':
messages = [{"role": "system", "content": system_prompt}] + messages
else:
# Replace existing system message
messages[0] = {"role": "system", "content": system_prompt}
# Build request parameters
request_params = {
"model": kwargs.get("model", api_config.get('model', 'gpt-3.5-turbo')),
"messages": messages,
"temperature": kwargs.get("temperature", api_config.get('default_temperature', 0.9)),
"top_p": kwargs.get("top_p", api_config.get('default_top_p', 1.0)),
"frequency_penalty": kwargs.get("frequency_penalty", api_config.get('default_frequency_penalty', 0.0)),
"presence_penalty": kwargs.get("presence_penalty", api_config.get('default_presence_penalty', 0.0)),
"stream": stream
}
# Add max_tokens if specified
if kwargs.get("max_tokens"):
request_params["max_tokens"] = kwargs["max_tokens"]
# Add tools if provided
if tools:
request_params["tools"] = tools
request_params["tool_choice"] = kwargs.get("tool_choice", "auto")
# Make API call with proper configuration
api_key = api_config.get('api_key')
api_base = api_config.get('api_base')
if stream:
return self._handle_stream_response(request_params, api_key, api_base)
else:
return self._handle_sync_response(request_params, api_key, api_base)
except Exception as e:
error_msg = str(e)
logger.error(f"[{self.__class__.__name__}] call_with_tools error: {error_msg}")
if stream:
def error_generator():
yield {
"error": True,
"message": error_msg,
"status_code": 500
}
return error_generator()
else:
return {
"error": True,
"message": error_msg,
"status_code": 500
}
def _handle_sync_response(self, request_params, api_key, api_base):
try:
# Build kwargs with explicit API configuration
kwargs = dict(request_params)
if api_key:
kwargs["api_key"] = api_key
if api_base:
kwargs["api_base"] = api_base
response = openai.ChatCompletion.create(**kwargs)
return response
except Exception as e:
logger.error(f"[{self.__class__.__name__}] sync response error: {e}")
return {
"error": True,
"message": str(e),
"status_code": 500
}
def _handle_stream_response(self, request_params, api_key, api_base):
try:
# Build kwargs with explicit API configuration
kwargs = dict(request_params)
if api_key:
kwargs["api_key"] = api_key
if api_base:
kwargs["api_base"] = api_base
stream = openai.ChatCompletion.create(**kwargs)
# Stream chunks to caller
for chunk in stream:
yield chunk
except Exception as e:
logger.error(f"[{self.__class__.__name__}] stream response error: {e}")
yield {
"error": True,
"message": str(e),
"status_code": 500
}
def _convert_tools_to_openai_format(self, tools):
if not tools:
return None
openai_tools = []
for tool in tools:
# Check if already in OpenAI format
if 'type' in tool and tool['type'] == 'function':
openai_tools.append(tool)
else:
# Convert from Claude format
openai_tools.append({
"type": "function",
"function": {
"name": tool.get("name"),
"description": tool.get("description"),
"parameters": tool.get("input_schema", {})
}
})
return openai_tools
def _convert_messages_to_openai_format(self, messages):
if not messages:
return []
openai_messages = []
for msg in messages:
role = msg.get("role")
content = msg.get("content")
# Handle string content (already in correct format)
if isinstance(content, str):
openai_messages.append(msg)
continue
# Handle list content (Claude format with content blocks)
if isinstance(content, list):
# Check if this is a tool result message (user role with tool_result blocks)
if role == "user" and any(block.get("type") == "tool_result" for block in content):
# Separate text content and tool_result blocks
text_parts = []
tool_results = []
for block in content:
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_result":
tool_results.append(block)
# First, add tool result messages (must come immediately after assistant with tool_calls)
for block in tool_results:
tool_call_id = block.get("tool_use_id") or ""
if not tool_call_id:
logger.warning(f"[OpenAICompatible] tool_result missing tool_use_id, using empty string")
# Ensure content is a string (some providers require string content)
result_content = block.get("content", "")
if not isinstance(result_content, str):
result_content = json.dumps(result_content, ensure_ascii=False)
openai_messages.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": result_content
})
# Then, add text content as a separate user message if present
if text_parts:
openai_messages.append({
"role": "user",
"content": " ".join(text_parts)
})
# Check if this is an assistant message with tool_use blocks
elif role == "assistant":
# Separate text content and tool_use blocks
text_parts = []
tool_calls = []
for block in content:
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_use":
tool_id = block.get("id") or ""
if not tool_id:
logger.warning(f"[OpenAICompatible] tool_use missing id for '{block.get('name')}'")
tool_calls.append({
"id": tool_id,
"type": "function",
"function": {
"name": block.get("name"),
"arguments": json.dumps(block.get("input", {}))
}
})
# Build OpenAI format assistant message
openai_msg = {
"role": "assistant",
"content": " ".join(text_parts) if text_parts else None
}
if tool_calls:
openai_msg["tool_calls"] = tool_calls
if msg.get("_gemini_raw_parts"):
openai_msg["_gemini_raw_parts"] = msg["_gemini_raw_parts"]
openai_messages.append(openai_msg)
else:
# Other list content, keep as is
openai_messages.append(msg)
else:
# Other formats, keep as is
openai_messages.append(msg)
return drop_orphaned_tool_results_openai(openai_messages) | --- +++ @@ -1,5 +1,11 @@ # encoding:utf-8
+"""
+OpenAI-Compatible Bot Base Class
+
+Provides a common implementation for bots that are compatible with OpenAI's API format.
+This includes: OpenAI, LinkAI, Azure OpenAI, and many third-party providers.
+"""
import json
import openai
@@ -8,11 +14,57 @@
class OpenAICompatibleBot:
+ """
+ Base class for OpenAI-compatible bots.
+
+ Provides common tool calling implementation that can be inherited by:
+ - ChatGPTBot
+ - LinkAIBot
+ - OpenAIBot
+ - AzureChatGPTBot
+ - Other OpenAI-compatible providers
+
+ Subclasses only need to override get_api_config() to provide their specific API settings.
+ """
def get_api_config(self):
+ """
+ Get API configuration for this bot.
+
+ Subclasses should override this to provide their specific config.
+
+ Returns:
+ dict: {
+ 'api_key': str,
+ 'api_base': str (optional),
+ 'model': str,
+ 'default_temperature': float,
+ 'default_top_p': float,
+ 'default_frequency_penalty': float,
+ 'default_presence_penalty': float,
+ }
+ """
raise NotImplementedError("Subclasses must implement get_api_config()")
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
+ """
+ Call OpenAI-compatible API with tool support for agent integration
+
+ This method handles:
+ 1. Format conversion (Claude format → OpenAI format)
+ 2. System prompt injection
+ 3. API calling with proper configuration
+ 4. Error handling
+
+ Args:
+ messages: List of messages (may be in Claude format from agent)
+ tools: List of tool definitions (may be in Claude format from agent)
+ stream: Whether to use streaming
+ **kwargs: Additional parameters (max_tokens, temperature, system, etc.)
+
+ Returns:
+ Formatted response in OpenAI format or generator for streaming
+ """
try:
# Get API configuration from subclass
api_config = self.get_api_config()
@@ -82,6 +134,7 @@ }
def _handle_sync_response(self, request_params, api_key, api_base):
+ """Handle synchronous OpenAI API response"""
try:
# Build kwargs with explicit API configuration
kwargs = dict(request_params)
@@ -102,6 +155,7 @@ }
def _handle_stream_response(self, request_params, api_key, api_base):
+ """Handle streaming OpenAI API response"""
try:
# Build kwargs with explicit API configuration
kwargs = dict(request_params)
@@ -125,6 +179,12 @@ }
def _convert_tools_to_openai_format(self, tools):
+ """
+ Convert tools from Claude format to OpenAI format
+
+ Claude format: {name, description, input_schema}
+ OpenAI format: {type: "function", function: {name, description, parameters}}
+ """
if not tools:
return None
@@ -147,6 +207,12 @@ return openai_tools
def _convert_messages_to_openai_format(self, messages):
+ """
+ Convert messages from Claude format to OpenAI format
+
+ Claude uses content blocks with types like 'tool_use', 'tool_result'
+ OpenAI uses 'tool_calls' in assistant messages and 'tool' role for results
+ """
if not messages:
return []
@@ -239,4 +305,4 @@ # Other formats, keep as is
openai_messages.append(msg)
- return drop_orphaned_tool_results_openai(openai_messages)+ return drop_orphaned_tool_results_openai(openai_messages)
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/openai_compatible_bot.py |
Help me add docstrings to my project | import plugins
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from plugins import *
from .midjourney import MJBot
from .summary import LinkSummary
from bridge import bridge
from common.expired_dict import ExpiredDict
from common import const
import os
from .utils import Util
from config import plugin_config, conf
@plugins.register(
name="linkai",
desc="A plugin that supports knowledge base and midjourney drawing.",
version="0.1.0",
author="https://link-ai.tech",
desire_priority=99
)
class LinkAI(Plugin):
def __init__(self):
super().__init__()
self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
self.config = super().load_config()
if not self.config:
# 未加载到配置,使用模板中的配置
self.config = self._load_config_template()
if self.config:
self.mj_bot = MJBot(self.config.get("midjourney"), self._fetch_group_app_code)
self.sum_config = {}
if self.config:
self.sum_config = self.config.get("summary")
logger.debug(f"[LinkAI] inited, config={self.config}")
def on_handle_context(self, e_context: EventContext):
if not self.config:
return
context = e_context['context']
if context.type not in [ContextType.TEXT, ContextType.IMAGE, ContextType.IMAGE_CREATE, ContextType.FILE,
ContextType.SHARING]:
# filter content no need solve
return
if context.type in [ContextType.FILE, ContextType.IMAGE] and self._is_summary_open(context):
# 文件处理
context.get("msg").prepare()
file_path = context.content
if not LinkSummary().check_file(file_path, self.sum_config):
return
if context.type != ContextType.IMAGE:
_send_info(e_context, "正在为你加速生成摘要,请稍后")
app_code = self._fetch_app_code(context)
res = LinkSummary().summary_file(file_path, app_code)
if not res:
if context.type != ContextType.IMAGE:
_set_reply_text("因为神秘力量无法获取内容,请稍后再试吧", e_context, level=ReplyType.TEXT)
return
summary_text = res.get("summary")
if context.type != ContextType.IMAGE:
USER_FILE_MAP[_find_user_id(context) + "-sum_id"] = res.get("summary_id")
summary_text += "\n\n💬 发送 \"开启对话\" 可以开启与文件内容的对话"
_set_reply_text(summary_text, e_context, level=ReplyType.TEXT)
os.remove(file_path)
return
if (context.type == ContextType.SHARING and self._is_summary_open(context)) or \
(context.type == ContextType.TEXT and self._is_summary_open(context) and LinkSummary().check_url(context.content)):
if not LinkSummary().check_url(context.content):
return
_send_info(e_context, "正在为你加速生成摘要,请稍后")
app_code = self._fetch_app_code(context)
res = LinkSummary().summary_url(context.content, app_code)
if not res:
_set_reply_text("因为神秘力量无法获取文章内容,请稍后再试吧~", e_context, level=ReplyType.TEXT)
return
_set_reply_text(res.get("summary") + "\n\n💬 发送 \"开启对话\" 可以开启与文章内容的对话", e_context,
level=ReplyType.TEXT)
USER_FILE_MAP[_find_user_id(context) + "-sum_id"] = res.get("summary_id")
return
mj_type = self.mj_bot.judge_mj_task_type(e_context)
if mj_type:
# MJ作图任务处理
self.mj_bot.process_mj_task(mj_type, e_context)
return
if context.content.startswith(f"{_get_trigger_prefix()}linkai"):
# 应用管理功能
self._process_admin_cmd(e_context)
return
if context.type == ContextType.TEXT and context.content == "开启对话" and _find_sum_id(context):
# 文本对话
_send_info(e_context, "正在为你开启对话,请稍后")
res = LinkSummary().summary_chat(_find_sum_id(context))
if not res:
_set_reply_text("开启对话失败,请稍后再试吧", e_context)
return
USER_FILE_MAP[_find_user_id(context) + "-file_id"] = res.get("file_id")
_set_reply_text("💡你可以问我关于这篇文章的任何问题,例如:\n\n" + res.get(
"questions") + "\n\n发送 \"退出对话\" 可以关闭与文章的对话", e_context, level=ReplyType.TEXT)
return
if context.type == ContextType.TEXT and context.content == "退出对话" and _find_file_id(context):
del USER_FILE_MAP[_find_user_id(context) + "-file_id"]
bot = bridge.Bridge().find_chat_bot(const.LINKAI)
bot.sessions.clear_session(context["session_id"])
_set_reply_text("对话已退出", e_context, level=ReplyType.TEXT)
return
if context.type == ContextType.TEXT and _find_file_id(context):
bot = bridge.Bridge().find_chat_bot(const.LINKAI)
context.kwargs["file_id"] = _find_file_id(context)
reply = bot.reply(context.content, context)
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
return
if self._is_chat_task(e_context):
# 文本对话任务处理
self._process_chat_task(e_context)
# 插件管理功能
def _process_admin_cmd(self, e_context: EventContext):
context = e_context['context']
cmd = context.content.split()
if len(cmd) == 1 or (len(cmd) == 2 and cmd[1] == "help"):
_set_reply_text(self.get_help_text(verbose=True), e_context, level=ReplyType.INFO)
return
if len(cmd) == 2 and (cmd[1] == "open" or cmd[1] == "close"):
# 知识库开关指令
if not Util.is_admin(e_context):
_set_reply_text("需要管理员权限执行", e_context, level=ReplyType.ERROR)
return
is_open = True
tips_text = "开启"
if cmd[1] == "close":
tips_text = "关闭"
is_open = False
conf()["use_linkai"] = is_open
bridge.Bridge().reset_bot()
_set_reply_text(f"LinkAI对话功能{tips_text}", e_context, level=ReplyType.INFO)
return
if len(cmd) == 3 and cmd[1] == "app":
# 知识库应用切换指令
if not context.kwargs.get("isgroup"):
_set_reply_text("该指令需在群聊中使用", e_context, level=ReplyType.ERROR)
return
if not Util.is_admin(e_context):
_set_reply_text("需要管理员权限执行", e_context, level=ReplyType.ERROR)
return
app_code = cmd[2]
group_name = context.kwargs.get("msg").from_user_nickname
group_mapping = self.config.get("group_app_map")
if group_mapping:
group_mapping[group_name] = app_code
else:
self.config["group_app_map"] = {group_name: app_code}
# 保存插件配置
super().save_config(self.config)
_set_reply_text(f"应用设置成功: {app_code}", e_context, level=ReplyType.INFO)
return
if len(cmd) == 3 and cmd[1] == "sum" and (cmd[2] == "open" or cmd[2] == "close"):
# 总结对话开关指令
if not Util.is_admin(e_context):
_set_reply_text("需要管理员权限执行", e_context, level=ReplyType.ERROR)
return
is_open = True
tips_text = "开启"
if cmd[2] == "close":
tips_text = "关闭"
is_open = False
if not self.sum_config:
_set_reply_text(
f"插件未启用summary功能,请参考以下链添加插件配置\n\nhttps://github.com/zhayujie/chatgpt-on-wechat/blob/master/plugins/linkai/README.md",
e_context, level=ReplyType.INFO)
else:
self.sum_config["enabled"] = is_open
_set_reply_text(f"文章总结功能{tips_text}", e_context, level=ReplyType.INFO)
return
_set_reply_text(f"指令错误,请输入{_get_trigger_prefix()}linkai help 获取帮助", e_context,
level=ReplyType.INFO)
return
def _is_summary_open(self, context) -> bool:
# 获取远程应用插件状态
remote_enabled = False
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
group_name = context.get("msg").from_user_nickname
app_code = self._fetch_group_app_code(group_name)
if app_code:
if context.type.name in ["FILE", "SHARING"]:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
else:
# 非群聊场景使用全局app_code
app_code = conf().get("linkai_app_code")
if app_code:
if context.type.name in ["FILE", "SHARING"]:
remote_enabled = Util.fetch_app_plugin(app_code, "内容总结")
# 基础条件:总开关开启且消息类型符合要求
base_enabled = (
self.sum_config
and self.sum_config.get("enabled")
and (context.type.name in (
self.sum_config.get("type") or ["FILE", "SHARING"]) or context.type.name == "TEXT")
)
# 群聊:需要满足(总开关和群开关)或远程插件开启
if context.kwargs.get("isgroup"):
return (base_enabled and self.sum_config.get("group_enabled")) or remote_enabled
# 非群聊:只需要满足总开关或远程插件开启
return base_enabled or remote_enabled
# LinkAI 对话任务处理
def _is_chat_task(self, e_context: EventContext):
context = e_context['context']
# 群聊应用管理
return self.config.get("group_app_map") and context.kwargs.get("isgroup")
def _process_chat_task(self, e_context: EventContext):
context = e_context['context']
# 群聊应用管理
group_name = context.get("msg").from_user_nickname
app_code = self._fetch_group_app_code(group_name)
if app_code:
context.kwargs['app_code'] = app_code
def _fetch_group_app_code(self, group_name: str) -> str:
group_mapping = self.config.get("group_app_map")
if group_mapping:
app_code = group_mapping.get(group_name) or group_mapping.get("ALL_GROUP")
return app_code
def _fetch_app_code(self, context) -> str:
app_code = conf().get("linkai_app_code")
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
group_name = context.get("msg").from_user_nickname
app_code = self._fetch_group_app_code(group_name)
return app_code
def get_help_text(self, verbose=False, **kwargs):
trigger_prefix = _get_trigger_prefix()
help_text = "用于集成 LinkAI 提供的知识库、Midjourney绘画、文档总结、联网搜索等能力。\n\n"
if not verbose:
return help_text
help_text += f'📖 知识库\n - 群聊中指定应用: {trigger_prefix}linkai app 应用编码\n'
help_text += f' - {trigger_prefix}linkai open: 开启对话\n'
help_text += f' - {trigger_prefix}linkai close: 关闭对话\n'
help_text += f'\n例如: \n"{trigger_prefix}linkai app Kv2fXJcH"\n\n'
help_text += f"🎨 绘画\n - 生成: {trigger_prefix}mj 描述词1, 描述词2.. \n - 放大: {trigger_prefix}mju 图片ID 图片序号\n - 变换: {trigger_prefix}mjv 图片ID 图片序号\n - 重置: {trigger_prefix}mjr 图片ID"
help_text += f"\n\n例如:\n\"{trigger_prefix}mj a little cat, white --ar 9:16\"\n\"{trigger_prefix}mju 11055927171882 2\""
help_text += f"\n\"{trigger_prefix}mjv 11055927171882 2\"\n\"{trigger_prefix}mjr 11055927171882\""
help_text += f"\n\n💡 文档总结和对话\n - 开启: {trigger_prefix}linkai sum open\n - 使用: 发送文件、公众号文章等可生成摘要,并与内容对话"
return help_text
def _load_config_template(self):
logger.debug("No LinkAI plugin config.json, use plugins/linkai/config.json.template")
try:
plugin_config_path = os.path.join(self.path, "config.json.template")
if os.path.exists(plugin_config_path):
with open(plugin_config_path, "r", encoding="utf-8") as f:
plugin_conf = json.load(f)
plugin_conf["midjourney"]["enabled"] = False
plugin_conf["summary"]["enabled"] = False
write_plugin_config({"linkai": plugin_conf})
return plugin_conf
except Exception as e:
logger.exception(e)
def reload(self):
self.config = super().load_config()
def _send_info(e_context: EventContext, content: str):
reply = Reply(ReplyType.TEXT, content)
channel = e_context["channel"]
channel.send(reply, e_context["context"])
def _find_user_id(context):
if context["isgroup"]:
return context.kwargs.get("msg").actual_user_id
else:
return context["receiver"]
def _set_reply_text(content: str, e_context: EventContext, level: ReplyType = ReplyType.ERROR):
reply = Reply(level, content)
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
def _get_trigger_prefix():
return conf().get("plugin_trigger_prefix", "$")
def _find_sum_id(context):
return USER_FILE_MAP.get(_find_user_id(context) + "-sum_id")
def _find_file_id(context):
user_id = _find_user_id(context)
if user_id:
return USER_FILE_MAP.get(user_id + "-file_id")
USER_FILE_MAP = ExpiredDict(conf().get("expires_in_seconds") or 60 * 30) | --- +++ @@ -35,6 +35,10 @@ logger.debug(f"[LinkAI] inited, config={self.config}")
def on_handle_context(self, e_context: EventContext):
+ """
+ 消息处理逻辑
+ :param e_context: 消息上下文
+ """
if not self.config:
return
@@ -228,6 +232,10 @@ return self.config.get("group_app_map") and context.kwargs.get("isgroup")
def _process_chat_task(self, e_context: EventContext):
+ """
+ 处理LinkAI对话任务
+ :param e_context: 对话上下文
+ """
context = e_context['context']
# 群聊应用管理
group_name = context.get("msg").from_user_nickname
@@ -236,12 +244,22 @@ context.kwargs['app_code'] = app_code
def _fetch_group_app_code(self, group_name: str) -> str:
+ """
+ 根据群聊名称获取对应的应用code
+ :param group_name: 群聊名称
+ :return: 应用code
+ """
group_mapping = self.config.get("group_app_map")
if group_mapping:
app_code = group_mapping.get(group_name) or group_mapping.get("ALL_GROUP")
return app_code
def _fetch_app_code(self, context) -> str:
+ """
+ 根据主配置或者群聊名称获取对应的应用code,优先获取群聊配置的应用code
+ :param context: 上下文
+ :return: 应用code
+ """
app_code = conf().get("linkai_app_code")
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
@@ -315,4 +333,4 @@ return USER_FILE_MAP.get(user_id + "-file_id")
-USER_FILE_MAP = ExpiredDict(conf().get("expires_in_seconds") or 60 * 30)+USER_FILE_MAP = ExpiredDict(conf().get("expires_in_seconds") or 60 * 30)
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/plugins/linkai/linkai.py |
Document all public functions with docstrings | # coding=utf-8
import http.client
import json
import time
import requests
import datetime
import hashlib
import hmac
import base64
import urllib.parse
import uuid
from common.log import logger
from common.tmp_dir import TmpDir
def text_to_speech_aliyun(url, text, appkey, token):
headers = {
"Content-Type": "application/json",
}
data = {
"text": text,
"appkey": appkey,
"token": token,
"format": "wav"
}
response = requests.post(url, headers=headers, data=json.dumps(data))
if response.status_code == 200 and response.headers['Content-Type'] == 'audio/mpeg':
output_file = TmpDir().path() + "reply-" + str(int(time.time())) + "-" + str(hash(text) & 0x7FFFFFFF) + ".wav"
with open(output_file, 'wb') as file:
file.write(response.content)
logger.debug(f"音频文件保存成功,文件名:{output_file}")
else:
logger.debug("响应状态码: {}".format(response.status_code))
logger.debug("响应内容: {}".format(response.text))
output_file = None
return output_file
def speech_to_text_aliyun(url, audioContent, appkey, token):
format = 'pcm'
sample_rate = 16000
enablePunctuationPrediction = True
enableInverseTextNormalization = True
enableVoiceDetection = False
# 设置RESTful请求参数
request = url + '?appkey=' + appkey
request = request + '&format=' + format
request = request + '&sample_rate=' + str(sample_rate)
if enablePunctuationPrediction :
request = request + '&enable_punctuation_prediction=' + 'true'
if enableInverseTextNormalization :
request = request + '&enable_inverse_text_normalization=' + 'true'
if enableVoiceDetection :
request = request + '&enable_voice_detection=' + 'true'
host = 'nls-gateway-cn-shanghai.aliyuncs.com'
# 设置HTTPS请求头部
httpHeaders = {
'X-NLS-Token': token,
'Content-type': 'application/octet-stream',
'Content-Length': len(audioContent)
}
conn = http.client.HTTPSConnection(host)
conn.request(method='POST', url=request, body=audioContent, headers=httpHeaders)
response = conn.getresponse()
body = response.read()
try:
body = json.loads(body)
status = body['status']
if status == 20000000 :
result = body['result']
if result :
logger.info(f"阿里云语音识别到了:{result}")
conn.close()
return result
else :
logger.error(f"语音识别失败,状态码: {status}")
except ValueError:
logger.error(f"语音识别失败,收到非JSON格式的数据: {body}")
conn.close()
return None
class AliyunTokenGenerator:
def __init__(self, access_key_id, access_key_secret):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
def sign_request(self, parameters):
# 将参数按照字典顺序排序
sorted_params = sorted(parameters.items())
# 构造待签名的查询字符串
canonicalized_query_string = ''
for (k, v) in sorted_params:
canonicalized_query_string += '&' + self.percent_encode(k) + '=' + self.percent_encode(v)
# 构造用于签名的字符串
string_to_sign = 'GET&%2F&' + self.percent_encode(canonicalized_query_string[1:]) # 使用GET方法
# 使用HMAC算法计算签名
h = hmac.new((self.access_key_secret + "&").encode('utf-8'), string_to_sign.encode('utf-8'), hashlib.sha1)
signature = base64.encodebytes(h.digest()).strip()
return signature
def percent_encode(self, encode_str):
encode_str = str(encode_str)
res = urllib.parse.quote(encode_str, '')
res = res.replace('+', '%20')
res = res.replace('*', '%2A')
res = res.replace('%7E', '~')
return res
def get_token(self):
# 设置请求参数
params = {
'Format': 'JSON',
'Version': '2019-02-28',
'AccessKeyId': self.access_key_id,
'SignatureMethod': 'HMAC-SHA1',
'Timestamp': datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"),
'SignatureVersion': '1.0',
'SignatureNonce': str(uuid.uuid4()), # 使用uuid生成唯一的随机数
'Action': 'CreateToken',
'RegionId': 'cn-shanghai'
}
# 计算签名
signature = self.sign_request(params)
params['Signature'] = signature
# 构造请求URL
url = 'http://nls-meta.cn-shanghai.aliyuncs.com/?' + urllib.parse.urlencode(params)
# 发送请求
response = requests.get(url)
return response.text | --- +++ @@ -1,4 +1,12 @@ # coding=utf-8
+"""
+Author: chazzjimel
+Email: chazzjimel@gmail.com
+wechat:cheung-z-x
+
+Description:
+
+"""
import http.client
import json
@@ -16,6 +24,18 @@
def text_to_speech_aliyun(url, text, appkey, token):
+ """
+ 使用阿里云的文本转语音服务将文本转换为语音。
+
+ 参数:
+ - url (str): 阿里云文本转语音服务的端点URL。
+ - text (str): 要转换为语音的文本。
+ - appkey (str): 您的阿里云appkey。
+ - token (str): 阿里云API的认证令牌。
+
+ 返回值:
+ - str: 成功时输出音频文件的路径,否则为None。
+ """
headers = {
"Content-Type": "application/json",
}
@@ -43,6 +63,18 @@ return output_file
def speech_to_text_aliyun(url, audioContent, appkey, token):
+ """
+ 使用阿里云的语音识别服务识别音频文件中的语音。
+
+ 参数:
+ - url (str): 阿里云语音识别服务的端点URL。
+ - audioContent (byte): pcm音频数据。
+ - appkey (str): 您的阿里云appkey。
+ - token (str): 阿里云API的认证令牌。
+
+ 返回值:
+ - str: 成功时输出识别到的文本,否则为None。
+ """
format = 'pcm'
sample_rate = 16000
enablePunctuationPrediction = True
@@ -95,12 +127,28 @@
class AliyunTokenGenerator:
+ """
+ 用于生成阿里云服务认证令牌的类。
+
+ 属性:
+ - access_key_id (str): 您的阿里云访问密钥ID。
+ - access_key_secret (str): 您的阿里云访问密钥秘密。
+ """
def __init__(self, access_key_id, access_key_secret):
self.access_key_id = access_key_id
self.access_key_secret = access_key_secret
def sign_request(self, parameters):
+ """
+ 为阿里云服务签名请求。
+
+ 参数:
+ - parameters (dict): 请求的参数字典。
+
+ 返回值:
+ - str: 请求的签名签章。
+ """
# 将参数按照字典顺序排序
sorted_params = sorted(parameters.items())
@@ -119,6 +167,15 @@ return signature
def percent_encode(self, encode_str):
+ """
+ 对字符串进行百分比编码。
+
+ 参数:
+ - encode_str (str): 要编码的字符串。
+
+ 返回值:
+ - str: 编码后的字符串。
+ """
encode_str = str(encode_str)
res = urllib.parse.quote(encode_str, '')
res = res.replace('+', '%20')
@@ -127,6 +184,12 @@ return res
def get_token(self):
+ """
+ 获取阿里云服务的令牌。
+
+ 返回值:
+ - str: 获取到的令牌。
+ """
# 设置请求参数
params = {
'Format': 'JSON',
@@ -150,4 +213,4 @@ # 发送请求
response = requests.get(url)
- return response.text+ return response.text
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/voice/ali/ali_api.py |
Auto-generate documentation strings for this file | # encoding:utf-8
import json
from models.bot import Bot
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from .dashscope_session import DashscopeSession
import os
import dashscope
from dashscope import MultiModalConversation
from http import HTTPStatus
# Legacy model name mapping for older dashscope SDK constants.
# New models don't need to be added here — they use their name string directly.
dashscope_models = {
"qwen-turbo": dashscope.Generation.Models.qwen_turbo,
"qwen-plus": dashscope.Generation.Models.qwen_plus,
"qwen-max": dashscope.Generation.Models.qwen_max,
"qwen-bailian-v1": dashscope.Generation.Models.bailian_v1,
}
# Model name prefixes that require MultiModalConversation API instead of Generation API.
# Qwen3.5+ series are omni models that only support MultiModalConversation.
MULTIMODAL_MODEL_PREFIXES = ("qwen3.5-",)
# Qwen对话模型API
class DashscopeBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(DashscopeSession, model=conf().get("model") or "qwen-plus")
self.model_name = conf().get("model") or "qwen-plus"
self.client = dashscope.Generation
api_key = conf().get("dashscope_api_key")
if api_key:
os.environ["DASHSCOPE_API_KEY"] = api_key
@property
def api_key(self):
return conf().get("dashscope_api_key")
@staticmethod
def _is_multimodal_model(model_name: str) -> bool:
return model_name.startswith(MULTIMODAL_MODEL_PREFIXES)
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[DASHSCOPE] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[DASHSCOPE] session query={}".format(session.messages))
reply_content = self.reply_text(session)
logger.debug(
"[DASHSCOPE] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[DASHSCOPE] reply {} used 0 tokens.".format(reply_content))
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: DashscopeSession, retry_count=0) -> dict:
try:
dashscope.api_key = self.api_key
model = dashscope_models.get(self.model_name, self.model_name)
if self._is_multimodal_model(self.model_name):
mm_messages = self._prepare_messages_for_multimodal(session.messages)
response = MultiModalConversation.call(
model=model,
messages=mm_messages,
result_format="message"
)
else:
response = self.client.call(
model,
messages=session.messages,
result_format="message"
)
if response.status_code == HTTPStatus.OK:
resp_dict = self._response_to_dict(response)
choice = resp_dict["output"]["choices"][0]
content = choice.get("message", {}).get("content", "")
# Multimodal models may return content as a list of blocks
if isinstance(content, list):
content = "".join(
item.get("text", "") for item in content if isinstance(item, dict)
)
usage = resp_dict.get("usage", {})
return {
"total_tokens": usage.get("total_tokens", 0),
"completion_tokens": usage.get("output_tokens", 0),
"content": content,
}
else:
logger.error('Request id: %s, Status code: %s, error code: %s, error message: %s' % (
response.request_id, response.status_code,
response.code, response.message
))
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text(session, retry_count + 1)
else:
return result
except Exception as e:
logger.exception(e)
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text(session, retry_count + 1)
else:
return result
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
try:
# Convert messages from Claude format to DashScope format
messages = self._convert_messages_to_dashscope_format(messages)
# Convert tools from Claude format to DashScope format
if tools:
tools = self._convert_tools_to_dashscope_format(tools)
# Handle system prompt
system_prompt = kwargs.get('system')
if system_prompt:
# Add system message at the beginning if not already present
if not messages or messages[0].get('role') != 'system':
messages = [{"role": "system", "content": system_prompt}] + messages
else:
# Replace existing system message
messages[0] = {"role": "system", "content": system_prompt}
# Build request parameters
model_name = kwargs.get("model", self.model_name)
parameters = {
"result_format": "message", # Required for tool calling
"temperature": kwargs.get("temperature", conf().get("temperature", 0.85)),
"top_p": kwargs.get("top_p", conf().get("top_p", 0.8)),
}
# Add max_tokens if specified
if kwargs.get("max_tokens"):
parameters["max_tokens"] = kwargs["max_tokens"]
# Add tools if provided
if tools:
parameters["tools"] = tools
# Add tool_choice if specified
if kwargs.get("tool_choice"):
parameters["tool_choice"] = kwargs["tool_choice"]
# Add thinking parameters for Qwen3 models (disabled by default for stability)
if "qwen3" in model_name.lower() or "qwq" in model_name.lower():
# Only enable thinking mode if explicitly requested
enable_thinking = kwargs.get("enable_thinking", False)
if enable_thinking:
parameters["enable_thinking"] = True
# Set thinking budget if specified
if kwargs.get("thinking_budget"):
parameters["thinking_budget"] = kwargs["thinking_budget"]
# Qwen3 requires incremental_output=true in thinking mode
if stream:
parameters["incremental_output"] = True
# Always use incremental_output for streaming (for better token-by-token streaming)
# This is especially important for tool calling to avoid incomplete responses
if stream:
parameters["incremental_output"] = True
# Make API call with DashScope SDK
if stream:
return self._handle_stream_response(model_name, messages, parameters)
else:
return self._handle_sync_response(model_name, messages, parameters)
except Exception as e:
error_msg = str(e)
logger.error(f"[DASHSCOPE] call_with_tools error: {error_msg}")
if stream:
def error_generator():
yield {
"error": True,
"message": error_msg,
"status_code": 500
}
return error_generator()
else:
return {
"error": True,
"message": error_msg,
"status_code": 500
}
def _handle_sync_response(self, model_name, messages, parameters):
try:
# Set API key before calling
dashscope.api_key = self.api_key
model = dashscope_models.get(model_name, model_name)
if self._is_multimodal_model(model_name):
messages = self._prepare_messages_for_multimodal(messages)
response = MultiModalConversation.call(
model=model,
messages=messages,
**parameters
)
else:
response = dashscope.Generation.call(
model=model,
messages=messages,
**parameters
)
if response.status_code == HTTPStatus.OK:
# Convert response to dict to avoid DashScope object KeyError issues
resp_dict = self._response_to_dict(response)
choice = resp_dict["output"]["choices"][0]
message = choice.get("message", {})
content = message.get("content", "")
# Multimodal models may return content as a list of blocks
if isinstance(content, list):
content = "".join(
item.get("text", "") for item in content if isinstance(item, dict)
)
usage = resp_dict.get("usage", {})
return {
"id": resp_dict.get("request_id"),
"object": "chat.completion",
"created": 0,
"model": model_name,
"choices": [{
"index": 0,
"message": {
"role": message.get("role", "assistant"),
"content": content,
"tool_calls": self._convert_tool_calls_to_openai_format(
message.get("tool_calls")
)
},
"finish_reason": choice.get("finish_reason")
}],
"usage": {
"prompt_tokens": usage.get("input_tokens", 0),
"completion_tokens": usage.get("output_tokens", 0),
"total_tokens": usage.get("total_tokens", 0)
}
}
else:
logger.error(f"[DASHSCOPE] API error: {response.code} - {response.message}")
return {
"error": True,
"message": response.message,
"status_code": response.status_code
}
except Exception as e:
logger.error(f"[DASHSCOPE] sync response error: {e}")
return {
"error": True,
"message": str(e),
"status_code": 500
}
def _handle_stream_response(self, model_name, messages, parameters):
try:
# Set API key before calling
dashscope.api_key = self.api_key
model = dashscope_models.get(model_name, model_name)
if self._is_multimodal_model(model_name):
messages = self._prepare_messages_for_multimodal(messages)
responses = MultiModalConversation.call(
model=model,
messages=messages,
stream=True,
**parameters
)
else:
responses = dashscope.Generation.call(
model=model,
messages=messages,
stream=True,
**parameters
)
# Stream chunks to caller, converting to OpenAI format
for response in responses:
# Convert to dict first to avoid DashScope proxy object KeyError
resp_dict = self._response_to_dict(response)
status_code = resp_dict.get("status_code", 200)
if status_code != HTTPStatus.OK:
err_code = resp_dict.get("code", "")
err_msg = resp_dict.get("message", "Unknown error")
logger.error(f"[DASHSCOPE] Stream error: {err_code} - {err_msg}")
yield {
"error": True,
"message": err_msg,
"status_code": status_code
}
continue
choices = resp_dict.get("output", {}).get("choices", [])
if not choices:
continue
choice = choices[0]
finish_reason = choice.get("finish_reason")
message = choice.get("message", {})
# Convert to OpenAI-compatible format
openai_chunk = {
"id": resp_dict.get("request_id"),
"object": "chat.completion.chunk",
"created": 0,
"model": model_name,
"choices": [{
"index": 0,
"delta": {},
"finish_reason": finish_reason
}]
}
# Add role
role = message.get("role")
if role:
openai_chunk["choices"][0]["delta"]["role"] = role
# Add reasoning_content (thinking process from models like qwen3.5)
reasoning_content = message.get("reasoning_content")
if reasoning_content:
openai_chunk["choices"][0]["delta"]["reasoning_content"] = reasoning_content
# Add content (multimodal models may return list of blocks)
content = message.get("content")
if isinstance(content, list):
content = "".join(
item.get("text", "") for item in content if isinstance(item, dict)
)
if content:
openai_chunk["choices"][0]["delta"]["content"] = content
# Add tool_calls
tool_calls = message.get("tool_calls")
if tool_calls:
openai_chunk["choices"][0]["delta"]["tool_calls"] = self._convert_tool_calls_to_openai_format(tool_calls)
yield openai_chunk
except Exception as e:
logger.error(f"[DASHSCOPE] stream response error: {e}", exc_info=True)
yield {
"error": True,
"message": str(e),
"status_code": 500
}
@staticmethod
def _response_to_dict(response) -> dict:
_SENTINEL = object()
def _safe_getattr(obj, name, default=_SENTINEL):
try:
return getattr(obj, name)
except (AttributeError, KeyError, TypeError):
return default
def _has_attr(obj, name):
return _safe_getattr(obj, name) is not _SENTINEL
def _to_dict(obj):
if isinstance(obj, (str, int, float, bool, type(None))):
return obj
if isinstance(obj, dict):
return {k: _to_dict(v) for k, v in obj.items()}
if isinstance(obj, (list, tuple)):
return [_to_dict(i) for i in obj]
# DashScope response objects behave like dicts (have .keys())
if _has_attr(obj, "keys"):
try:
return {k: _to_dict(obj[k]) for k in obj.keys()}
except Exception:
pass
return obj
result = {}
# Extract known top-level fields safely
for attr in ("request_id", "status_code", "code", "message", "output", "usage"):
val = _safe_getattr(response, attr)
if val is _SENTINEL:
try:
val = response[attr]
except (KeyError, TypeError, IndexError):
continue
result[attr] = _to_dict(val)
return result
def _convert_tools_to_dashscope_format(self, tools):
if not tools:
return None
dashscope_tools = []
for tool in tools:
# Check if already in DashScope/OpenAI format
if 'type' in tool and tool['type'] == 'function':
dashscope_tools.append(tool)
else:
# Convert from Claude format
dashscope_tools.append({
"type": "function",
"function": {
"name": tool.get("name"),
"description": tool.get("description"),
"parameters": tool.get("input_schema", {})
}
})
return dashscope_tools
@staticmethod
def _prepare_messages_for_multimodal(messages: list) -> list:
result = []
for msg in messages:
msg = dict(msg) # shallow copy
# Normalize content to list format [{"text": "..."}]
content = msg.get("content")
if content is None or (isinstance(content, str) and content == ""):
msg["content"] = [{"text": ""}]
elif isinstance(content, str):
msg["content"] = [{"text": content}]
# If content is already a list, keep as-is (already in multimodal format)
result.append(msg)
return result
def _convert_messages_to_dashscope_format(self, messages):
if not messages:
return []
dashscope_messages = []
for msg in messages:
role = msg.get("role")
content = msg.get("content")
# Handle string content (already in correct format)
if isinstance(content, str):
dashscope_messages.append(msg)
continue
# Handle list content (Claude format with content blocks)
if isinstance(content, list):
# Check if this is a tool result message (user role with tool_result blocks)
if role == "user" and any(block.get("type") == "tool_result" for block in content):
# Convert each tool_result block to a separate tool message
for block in content:
if block.get("type") == "tool_result":
dashscope_messages.append({
"role": "tool",
"content": block.get("content", ""),
"tool_call_id": block.get("tool_use_id") # DashScope uses 'tool_call_id'
})
# Check if this is an assistant message with tool_use blocks
elif role == "assistant":
# Separate text content and tool_use blocks
text_parts = []
tool_calls = []
for block in content:
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_use":
tool_calls.append({
"id": block.get("id"),
"type": "function",
"function": {
"name": block.get("name"),
"arguments": json.dumps(block.get("input", {}))
}
})
# Build DashScope format assistant message
dashscope_msg = {
"role": "assistant"
}
# Add content only if there is actual text
# DashScope API: when tool_calls exist, content should be None or omitted if empty
if text_parts:
dashscope_msg["content"] = " ".join(text_parts)
elif not tool_calls:
# If no tool_calls and no text, set empty string (rare case)
dashscope_msg["content"] = ""
# If there are tool_calls but no text, don't set content field at all
if tool_calls:
dashscope_msg["tool_calls"] = tool_calls
dashscope_messages.append(dashscope_msg)
else:
# Other list content, keep as is
dashscope_messages.append(msg)
else:
# Other formats, keep as is
dashscope_messages.append(msg)
return dashscope_messages
def _convert_tool_calls_to_openai_format(self, tool_calls):
if not tool_calls:
return None
openai_tool_calls = []
for tool_call in tool_calls:
# DashScope format is already similar to OpenAI
if isinstance(tool_call, dict):
openai_tool_calls.append(tool_call)
else:
# Handle object format
openai_tool_calls.append({
"id": getattr(tool_call, 'id', None),
"type": "function",
"function": {
"name": tool_call.function.name,
"arguments": tool_call.function.arguments
}
})
return openai_tool_calls | --- +++ @@ -46,6 +46,7 @@
@staticmethod
def _is_multimodal_model(model_name: str) -> bool:
+ """Check if the model requires MultiModalConversation API"""
return model_name.startswith(MULTIMODAL_MODEL_PREFIXES)
def reply(self, query, context=None):
@@ -93,6 +94,13 @@ return reply
def reply_text(self, session: DashscopeSession, retry_count=0) -> dict:
+ """
+ call openai's ChatCompletion to get the answer
+ :param session: a conversation session
+ :param session_id: session id
+ :param retry_count: retry count
+ :return: {}
+ """
try:
dashscope.api_key = self.api_key
model = dashscope_models.get(self.model_name, self.model_name)
@@ -146,6 +154,24 @@ return result
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
+ """
+ Call DashScope API with tool support for agent integration
+
+ This method handles:
+ 1. Format conversion (Claude format → DashScope format)
+ 2. System prompt injection
+ 3. API calling with DashScope SDK
+ 4. Thinking mode support (enable_thinking for Qwen3)
+
+ Args:
+ messages: List of messages (may be in Claude format from agent)
+ tools: List of tool definitions (may be in Claude format from agent)
+ stream: Whether to use streaming
+ **kwargs: Additional parameters (max_tokens, temperature, system, etc.)
+
+ Returns:
+ Formatted response or generator for streaming
+ """
try:
# Convert messages from Claude format to DashScope format
messages = self._convert_messages_to_dashscope_format(messages)
@@ -229,6 +255,7 @@ }
def _handle_sync_response(self, model_name, messages, parameters):
+ """Handle synchronous DashScope API response"""
try:
# Set API key before calling
dashscope.api_key = self.api_key
@@ -299,6 +326,7 @@ }
def _handle_stream_response(self, model_name, messages, parameters):
+ """Handle streaming DashScope API response"""
try:
# Set API key before calling
dashscope.api_key = self.api_key
@@ -394,9 +422,18 @@
@staticmethod
def _response_to_dict(response) -> dict:
+ """
+ Convert DashScope response object to a plain dict.
+
+ DashScope SDK wraps responses in proxy objects whose __getattr__
+ delegates to __getitem__, raising KeyError (not AttributeError)
+ when an attribute is missing. Standard hasattr / getattr only
+ catch AttributeError, so we must use try-except everywhere.
+ """
_SENTINEL = object()
def _safe_getattr(obj, name, default=_SENTINEL):
+ """getattr that also catches KeyError from DashScope proxy objects."""
try:
return getattr(obj, name)
except (AttributeError, KeyError, TypeError):
@@ -433,6 +470,12 @@ return result
def _convert_tools_to_dashscope_format(self, tools):
+ """
+ Convert tools from Claude format to DashScope format
+
+ Claude format: {name, description, input_schema}
+ DashScope format: {type: "function", function: {name, description, parameters}}
+ """
if not tools:
return None
@@ -456,6 +499,20 @@
@staticmethod
def _prepare_messages_for_multimodal(messages: list) -> list:
+ """
+ Ensure messages are compatible with MultiModalConversation API.
+
+ MultiModalConversation._preprocess_messages iterates every message
+ with ``content = message["content"]; for elem in content: ...``,
+ which means:
+ 1. Every message MUST have a 'content' key.
+ 2. 'content' MUST be an iterable (list), not a plain string.
+ The expected format is [{"text": "..."}, ...].
+
+ Meanwhile the DashScope API requires role='tool' messages to follow
+ assistant tool_calls, so we must NOT convert them to role='user'.
+ We just ensure they have a list-typed 'content'.
+ """
result = []
for msg in messages:
msg = dict(msg) # shallow copy
@@ -472,6 +529,12 @@ return result
def _convert_messages_to_dashscope_format(self, messages):
+ """
+ Convert messages from Claude format to DashScope format
+
+ Claude uses content blocks with types like 'tool_use', 'tool_result'
+ DashScope uses 'tool_calls' in assistant messages and 'tool' role for results
+ """
if not messages:
return []
@@ -546,6 +609,7 @@ return dashscope_messages
def _convert_tool_calls_to_openai_format(self, tool_calls):
+ """Convert DashScope tool_calls to OpenAI format"""
if not tool_calls:
return None
@@ -565,4 +629,4 @@ }
})
- return openai_tool_calls+ return openai_tool_calls
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/dashscope/dashscope_bot.py |
Add docstrings to improve collaboration | # access LinkAI knowledge base platform
# docs: https://link-ai.tech/platform/link-app/wechat
import re
import time
import requests
import json
import config
from models.bot import Bot
from models.openai_compatible_bot import OpenAICompatibleBot
from models.chatgpt.chat_gpt_session import ChatGPTSession
from models.session_manager import SessionManager
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, pconf
import threading
from common import memory, utils
import base64
import os
class LinkAIBot(Bot, OpenAICompatibleBot):
# authentication failed
AUTH_FAILED_CODE = 401
NO_QUOTA_CODE = 406
def __init__(self):
super().__init__()
self.sessions = LinkAISessionManager(LinkAISession, model=conf().get("model") or "gpt-3.5-turbo")
self.args = {}
def get_api_config(self):
return {
'api_key': conf().get("open_ai_api_key"), # LinkAI uses OpenAI-compatible key
'api_base': conf().get("open_ai_api_base", "https://api.link-ai.tech/v1"),
'model': conf().get("model", "gpt-3.5-turbo"),
'default_temperature': conf().get("temperature", 0.9),
'default_top_p': conf().get("top_p", 1.0),
'default_frequency_penalty': conf().get("frequency_penalty", 0.0),
'default_presence_penalty': conf().get("presence_penalty", 0.0),
}
def reply(self, query, context: Context = None) -> Reply:
if context.type == ContextType.TEXT:
return self._chat(query, context)
elif context.type == ContextType.IMAGE_CREATE:
if not conf().get("text_to_image"):
logger.warn("[LinkAI] text_to_image is not enabled, ignore the IMAGE_CREATE request")
return Reply(ReplyType.TEXT, "")
ok, res = self.create_img(query, 0)
if ok:
reply = Reply(ReplyType.IMAGE_URL, res)
else:
reply = Reply(ReplyType.ERROR, res)
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def _chat(self, query, context, retry_count=0) -> Reply:
if retry_count > 2:
# exit from retry 2 times
logger.warn("[LINKAI] failed after maximum number of retry times")
return Reply(ReplyType.TEXT, "请再问我一次吧")
try:
# load config
if context.get("generate_breaked_by"):
logger.info(f"[LINKAI] won't set appcode because a plugin ({context['generate_breaked_by']}) affected the context")
app_code = None
else:
plugin_app_code = self._find_group_mapping_code(context)
app_code = context.kwargs.get("app_code") or plugin_app_code or conf().get("linkai_app_code")
linkai_api_key = conf().get("linkai_api_key")
session_id = context["session_id"]
session_message = self.sessions.session_msg_query(query, session_id)
logger.debug(f"[LinkAI] session={session_message}, session_id={session_id}")
# image process
img_cache = memory.USER_IMAGE_CACHE.get(session_id)
if img_cache:
messages = self._process_image_msg(app_code=app_code, session_id=session_id, query=query, img_cache=img_cache)
if messages:
session_message = messages
model = conf().get("model")
# remove system message
if session_message[0].get("role") == "system":
if app_code or model == "wenxin":
session_message.pop(0)
body = {
"app_code": app_code,
"messages": session_message,
"model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
"temperature": conf().get("temperature"),
"top_p": conf().get("top_p", 1),
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"session_id": session_id,
"sender_id": session_id,
"channel_type": context.get("channel_type") or conf().get("channel_type", "web")
}
try:
from linkai import LinkAIClient
client_id = LinkAIClient.fetch_client_id()
if client_id:
body["client_id"] = client_id
# start: client info deliver
if context.kwargs.get("msg"):
body["session_id"] = context.kwargs.get("msg").from_user_id
if context.kwargs.get("msg").is_group:
body["is_group"] = True
body["group_name"] = context.kwargs.get("msg").from_user_nickname
body["sender_name"] = context.kwargs.get("msg").actual_user_nickname
else:
if body.get("channel_type") in ["wechatcom_app"]:
body["sender_name"] = context.kwargs.get("msg").from_user_id
else:
body["sender_name"] = context.kwargs.get("msg").from_user_nickname
except Exception as e:
pass
file_id = context.kwargs.get("file_id")
if file_id:
body["file_id"] = file_id
logger.info(f"[LINKAI] query={query}, app_code={app_code}, model={body.get('model')}, file_id={file_id}")
headers = {"Authorization": "Bearer " + linkai_api_key}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.tech")
res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers,
timeout=conf().get("request_timeout", 180))
if res.status_code == 200:
# execute success
response = res.json()
reply_content = response["choices"][0]["message"]["content"]
total_tokens = response["usage"]["total_tokens"]
res_code = response.get('code')
logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}, res_code={res_code}")
if res_code == 429:
logger.warn(f"[LINKAI] 用户访问超出限流配置,sender_id={body.get('sender_id')}")
else:
self.sessions.session_reply(reply_content, session_id, total_tokens, query=query)
agent_suffix = self._fetch_agent_suffix(response)
if agent_suffix:
reply_content += agent_suffix
if not agent_suffix:
knowledge_suffix = self._fetch_knowledge_search_suffix(response)
if knowledge_suffix:
reply_content += knowledge_suffix
# image process
if response["choices"][0].get("img_urls"):
thread = threading.Thread(target=self._send_image, args=(context.get("channel"), context, response["choices"][0].get("img_urls")))
thread.start()
reply_content = response["choices"][0].get("text_content")
if reply_content:
reply_content = self._process_url(reply_content)
return Reply(ReplyType.TEXT, reply_content)
else:
response = res.json()
error = response.get("error")
logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
if res.status_code >= 500:
# server error, need retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
error_reply = "提问太快啦,请休息一下再问我吧"
if res.status_code == 409:
error_reply = "这个问题我还没有学会,请问我其它问题吧"
return Reply(ReplyType.TEXT, error_reply)
except Exception as e:
logger.exception(e)
# retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
def _process_image_msg(self, app_code: str, session_id: str, query:str, img_cache: dict):
try:
enable_image_input = False
app_info = self._fetch_app_info(app_code)
if not app_info:
logger.debug(f"[LinkAI] not found app, can't process images, app_code={app_code}")
return None
plugins = app_info.get("data").get("plugins")
for plugin in plugins:
if plugin.get("input_type") and "IMAGE" in plugin.get("input_type"):
enable_image_input = True
if not enable_image_input:
return
msg = img_cache.get("msg")
path = img_cache.get("path")
msg.prepare()
logger.info(f"[LinkAI] query with images, path={path}")
messages = self._build_vision_msg(query, path)
memory.USER_IMAGE_CACHE[session_id] = None
return messages
except Exception as e:
logger.exception(e)
def _find_group_mapping_code(self, context):
try:
if context.kwargs.get("isgroup"):
group_name = context.kwargs.get("msg").from_user_nickname
if config.plugin_config and config.plugin_config.get("linkai"):
linkai_config = config.plugin_config.get("linkai")
group_mapping = linkai_config.get("group_app_map")
if group_mapping and group_name:
return group_mapping.get(group_name)
except Exception as e:
logger.exception(e)
return None
def _build_vision_msg(self, query: str, path: str):
try:
suffix = utils.get_path_suffix(path)
with open(path, "rb") as file:
base64_str = base64.b64encode(file.read()).decode('utf-8')
messages = [{
"role": "user",
"content": [
{
"type": "text",
"text": query
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/{suffix};base64,{base64_str}"
}
}
]
}]
return messages
except Exception as e:
logger.exception(e)
def reply_text(self, session: ChatGPTSession, app_code="", retry_count=0) -> dict:
if retry_count >= 2:
# exit from retry 2 times
logger.warn("[LINKAI] failed after maximum number of retry times")
return {
"total_tokens": 0,
"completion_tokens": 0,
"content": "请再问我一次吧"
}
try:
body = {
"app_code": app_code,
"messages": session.messages,
"model": conf().get("model") or "gpt-3.5-turbo", # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin, xunfei
"temperature": conf().get("temperature"),
"top_p": conf().get("top_p", 1),
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
}
if self.args.get("max_tokens"):
body["max_tokens"] = self.args.get("max_tokens")
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.tech")
res = requests.post(url=base_url + "/v1/chat/completions", json=body, headers=headers,
timeout=conf().get("request_timeout", 180))
if res.status_code == 200:
# execute success
response = res.json()
reply_content = response["choices"][0]["message"]["content"]
total_tokens = response["usage"]["total_tokens"]
logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}")
return {
"total_tokens": total_tokens,
"completion_tokens": response["usage"]["completion_tokens"],
"content": reply_content,
}
else:
response = res.json()
error = response.get("error")
logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
if res.status_code >= 500:
# server error, need retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self.reply_text(session, app_code, retry_count + 1)
return {
"total_tokens": 0,
"completion_tokens": 0,
"content": "提问太快啦,请休息一下再问我吧"
}
except Exception as e:
logger.exception(e)
# retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self.reply_text(session, app_code, retry_count + 1)
def _fetch_app_info(self, app_code: str):
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
# do http request
base_url = conf().get("linkai_api_base", "https://api.link-ai.tech")
params = {"app_code": app_code}
res = requests.get(url=base_url + "/v1/app/info", params=params, headers=headers, timeout=(5, 10))
if res.status_code == 200:
return res.json()
else:
logger.warning(f"[LinkAI] find app info exception, res={res}")
def create_img(self, query, retry_count=0, api_key=None):
try:
logger.info("[LinkImage] image_query={}".format(query))
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {conf().get('linkai_api_key')}"
}
data = {
"prompt": query,
"n": 1,
"model": conf().get("text_to_image") or "dall-e-2",
"response_format": "url",
"img_proxy": conf().get("image_proxy")
}
url = conf().get("linkai_api_base", "https://api.link-ai.tech") + "/v1/images/generations"
res = requests.post(url, headers=headers, json=data, timeout=(5, 90))
t2 = time.time()
image_url = res.json()["data"][0]["url"]
logger.info("[OPEN_AI] image_url={}".format(image_url))
return True, image_url
except Exception as e:
logger.error(format(e))
return False, "画图出现问题,请休息一下再问我吧"
def _fetch_knowledge_search_suffix(self, response) -> str:
try:
if response.get("knowledge_base"):
search_hit = response.get("knowledge_base").get("search_hit")
first_similarity = response.get("knowledge_base").get("first_similarity")
logger.info(f"[LINKAI] knowledge base, search_hit={search_hit}, first_similarity={first_similarity}")
plugin_config = pconf("linkai")
if plugin_config and plugin_config.get("knowledge_base") and plugin_config.get("knowledge_base").get("search_miss_text_enabled"):
search_miss_similarity = plugin_config.get("knowledge_base").get("search_miss_similarity")
search_miss_text = plugin_config.get("knowledge_base").get("search_miss_suffix")
if not search_hit:
return search_miss_text
if search_miss_similarity and float(search_miss_similarity) > first_similarity:
return search_miss_text
except Exception as e:
logger.exception(e)
def _fetch_agent_suffix(self, response):
try:
plugin_list = []
logger.debug(f"[LinkAgent] res={response}")
if response.get("agent") and response.get("agent").get("chain") and response.get("agent").get("need_show_plugin"):
chain = response.get("agent").get("chain")
suffix = "\n\n- - - - - - - - - - - -"
i = 0
for turn in chain:
plugin_name = turn.get('plugin_name')
suffix += "\n"
need_show_thought = response.get("agent").get("need_show_thought")
if turn.get("thought") and plugin_name and need_show_thought:
suffix += f"{turn.get('thought')}\n"
if plugin_name:
plugin_list.append(turn.get('plugin_name'))
if turn.get('plugin_icon'):
suffix += f"{turn.get('plugin_icon')} "
suffix += f"{turn.get('plugin_name')}"
if turn.get('plugin_input'):
suffix += f":{turn.get('plugin_input')}"
if i < len(chain) - 1:
suffix += "\n"
i += 1
logger.info(f"[LinkAgent] use plugins: {plugin_list}")
return suffix
except Exception as e:
logger.exception(e)
def _process_url(self, text):
try:
url_pattern = re.compile(r'\[(.*?)\]\((http[s]?://.*?)\)')
def replace_markdown_url(match):
return f"{match.group(2)}"
return url_pattern.sub(replace_markdown_url, text)
except Exception as e:
logger.error(e)
def _send_image(self, channel, context, image_urls):
if not image_urls:
return
max_send_num = conf().get("max_media_send_count")
send_interval = conf().get("media_send_interval")
file_type = (".pdf", ".doc", ".docx", ".csv", ".xls", ".xlsx", ".txt", ".rtf", ".ppt", ".pptx")
try:
i = 0
for url in image_urls:
if max_send_num and i >= max_send_num:
continue
i += 1
if url.endswith(".mp4"):
reply_type = ReplyType.VIDEO_URL
elif url.endswith(file_type):
reply_type = ReplyType.FILE
url = _download_file(url)
if not url:
continue
else:
reply_type = ReplyType.IMAGE_URL
reply = Reply(reply_type, url)
channel.send(reply, context)
if send_interval:
time.sleep(send_interval)
except Exception as e:
logger.error(e)
def _download_file(url: str):
try:
file_path = "tmp"
if not os.path.exists(file_path):
os.makedirs(file_path)
file_name = url.split("/")[-1] # 获取文件名
file_path = os.path.join(file_path, file_name)
response = requests.get(url)
with open(file_path, "wb") as f:
f.write(response.content)
return file_path
except Exception as e:
logger.warn(e)
class LinkAISessionManager(SessionManager):
def session_msg_query(self, query, session_id):
session = self.build_session(session_id)
messages = session.messages + [{"role": "user", "content": query}]
return messages
def session_reply(self, reply, session_id, total_tokens=None, query=None):
session = self.build_session(session_id)
if query:
session.add_query(query)
session.add_reply(reply)
try:
max_tokens = conf().get("conversation_max_tokens", 8000)
tokens_cnt = session.discard_exceeding(max_tokens, total_tokens)
logger.debug(f"[LinkAI] chat history, before tokens={total_tokens}, now tokens={tokens_cnt}")
except Exception as e:
logger.warning("Exception when counting tokens precisely for session: {}".format(str(e)))
return session
class LinkAISession(ChatGPTSession):
def calc_tokens(self):
if not self.messages:
return 0
return len(str(self.messages))
def discard_exceeding(self, max_tokens, cur_tokens=None):
cur_tokens = self.calc_tokens()
if cur_tokens > max_tokens:
for i in range(0, len(self.messages)):
if i > 0 and self.messages[i].get("role") == "assistant" and self.messages[i - 1].get("role") == "user":
self.messages.pop(i)
self.messages.pop(i - 1)
return self.calc_tokens()
return cur_tokens
# Add call_with_tools method to LinkAIBot class
def _linkai_call_with_tools(self, messages, tools=None, stream=False, **kwargs):
try:
# Convert messages from Claude format to OpenAI format
# This is important because Agent uses Claude format internally
messages = self._convert_messages_to_openai_format(messages)
# Convert tools from Claude format to OpenAI format
if tools:
tools = self._convert_tools_to_openai_format(tools)
# Handle system prompt (OpenAI uses system message, Claude uses separate parameter)
system_prompt = kwargs.get('system')
if system_prompt:
# Add system message at the beginning if not already present
if not messages or messages[0].get('role') != 'system':
messages = [{"role": "system", "content": system_prompt}] + messages
else:
# Replace existing system message
messages[0] = {"role": "system", "content": system_prompt}
logger.debug(f"[LinkAI] messages: {len(messages)}, tools: {len(tools) if tools else 0}, stream: {stream}")
# Build request parameters (LinkAI uses OpenAI-compatible format)
raw_ct = conf().get("channel_type", "web")
if isinstance(raw_ct, list):
channel_type = raw_ct[0] if raw_ct else "web"
elif isinstance(raw_ct, str) and "," in raw_ct:
channel_type = raw_ct.split(",")[0].strip()
else:
channel_type = raw_ct
body = {
"messages": messages,
"model": kwargs.get("model", conf().get("model") or "gpt-3.5-turbo"),
"temperature": kwargs.get("temperature", conf().get("temperature", 0.9)),
"top_p": kwargs.get("top_p", conf().get("top_p", 1)),
"frequency_penalty": kwargs.get("frequency_penalty", conf().get("frequency_penalty", 0.0)),
"presence_penalty": kwargs.get("presence_penalty", conf().get("presence_penalty", 0.0)),
"stream": stream,
"channel_type": kwargs.get("channel_type", channel_type),
}
if tools:
body["tools"] = tools
body["tool_choice"] = kwargs.get("tool_choice", "auto")
# Prepare headers
headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
base_url = conf().get("linkai_api_base", "https://api.link-ai.tech")
if stream:
return self._handle_linkai_stream_response(base_url, headers, body)
else:
return self._handle_linkai_sync_response(base_url, headers, body)
except Exception as e:
logger.error(f"[LinkAI] call_with_tools error: {e}")
if stream:
def error_generator():
yield {
"error": True,
"message": str(e),
"status_code": 500
}
return error_generator()
else:
return {
"error": True,
"message": str(e),
"status_code": 500
}
def _handle_linkai_sync_response(self, base_url, headers, body):
try:
res = requests.post(
url=base_url + "/v1/chat/completions",
json=body,
headers=headers,
timeout=conf().get("request_timeout", 180)
)
if res.status_code == 200:
response = res.json()
logger.debug(f"[LinkAI] reply: model={response.get('model')}, "
f"tokens={response.get('usage', {}).get('total_tokens', 0)}")
# LinkAI response is already in OpenAI-compatible format
return response
else:
error_data = res.json()
error_msg = error_data.get("error", {}).get("message", "Unknown error")
raise Exception(f"LinkAI API error: {res.status_code} - {error_msg}")
except Exception as e:
logger.error(f"[LinkAI] sync response error: {e}")
raise
def _handle_linkai_stream_response(self, base_url, headers, body):
try:
res = requests.post(
url=base_url + "/v1/chat/completions",
json=body,
headers=headers,
timeout=conf().get("request_timeout", 180),
stream=True
)
if res.status_code != 200:
error_text = res.text
try:
error_data = json.loads(error_text)
error_msg = error_data.get("error", {}).get("message", error_text)
except Exception:
error_msg = error_text or "Unknown error"
yield {
"error": True,
"status_code": res.status_code,
"message": error_msg
}
return
# Process streaming response (OpenAI-compatible SSE format)
for line in res.iter_lines():
if line:
line = line.decode('utf-8')
if line.startswith('data: '):
line = line[6:] # Remove 'data: ' prefix
if line == '[DONE]':
break
try:
chunk = json.loads(line)
except json.JSONDecodeError:
continue
# Check for error responses within the stream
# Some providers (e.g., MiniMax via LinkAI) return errors as:
# {'type': 'error', 'error': {'type': '...', 'message': '...', 'http_code': '400'}}
if chunk.get("type") == "error" or (
isinstance(chunk.get("error"), dict) and "message" in chunk.get("error", {})
):
error_data = chunk.get("error", {})
error_msg = error_data.get("message", "Unknown error") if isinstance(error_data, dict) else str(error_data)
http_code = error_data.get("http_code", "") if isinstance(error_data, dict) else ""
status_code = int(http_code) if http_code and str(http_code).isdigit() else 400
logger.error(f"[LinkAI] stream error: {error_msg} (http_code={http_code})")
yield {
"error": True,
"message": error_msg,
"status_code": status_code
}
return
yield chunk
except Exception as e:
logger.error(f"[LinkAI] stream response error: {e}")
yield {
"error": True,
"message": str(e),
"status_code": 500
}
# Attach methods to LinkAIBot class
LinkAIBot.call_with_tools = _linkai_call_with_tools
LinkAIBot._handle_linkai_sync_response = _handle_linkai_sync_response
LinkAIBot._handle_linkai_stream_response = _handle_linkai_stream_response | --- +++ @@ -30,6 +30,7 @@ self.args = {}
def get_api_config(self):
+ """Get API configuration for OpenAI-compatible base class"""
return {
'api_key': conf().get("open_ai_api_key"), # LinkAI uses OpenAI-compatible key
'api_base': conf().get("open_ai_api_base", "https://api.link-ai.tech/v1"),
@@ -58,6 +59,13 @@ return reply
def _chat(self, query, context, retry_count=0) -> Reply:
+ """
+ 发起对话请求
+ :param query: 请求提示词
+ :param context: 对话上下文
+ :param retry_count: 当前递归重试次数
+ :return: 回复
+ """
if retry_count > 2:
# exit from retry 2 times
logger.warn("[LINKAI] failed after maximum number of retry times")
@@ -483,6 +491,19 @@
# Add call_with_tools method to LinkAIBot class
def _linkai_call_with_tools(self, messages, tools=None, stream=False, **kwargs):
+ """
+ Call LinkAI API with tool support for agent integration
+ LinkAI is fully compatible with OpenAI's tool calling format
+
+ Args:
+ messages: List of messages
+ tools: List of tool definitions (OpenAI format)
+ stream: Whether to use streaming
+ **kwargs: Additional parameters (max_tokens, temperature, etc.)
+
+ Returns:
+ Formatted response in OpenAI format or generator for streaming
+ """
try:
# Convert messages from Claude format to OpenAI format
# This is important because Agent uses Claude format internally
@@ -555,6 +576,7 @@ }
def _handle_linkai_sync_response(self, base_url, headers, body):
+ """Handle synchronous LinkAI API response"""
try:
res = requests.post(
url=base_url + "/v1/chat/completions",
@@ -580,6 +602,7 @@ raise
def _handle_linkai_stream_response(self, base_url, headers, body):
+ """Handle streaming LinkAI API response"""
try:
res = requests.post(
url=base_url + "/v1/chat/completions",
@@ -648,4 +671,4 @@ # Attach methods to LinkAIBot class
LinkAIBot.call_with_tools = _linkai_call_with_tools
LinkAIBot._handle_linkai_sync_response = _handle_linkai_sync_response
-LinkAIBot._handle_linkai_stream_response = _handle_linkai_stream_response+LinkAIBot._handle_linkai_stream_response = _handle_linkai_stream_response
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/linkai/link_ai_bot.py |
Write proper docstrings for these functions | import os
import yaml
from typing import Dict, List, Optional
from agentmesh import AgentTeam, Agent, LLMModel
from agentmesh.models import ClaudeModel
from agentmesh.tools import ToolManager
from config import conf
import plugins
from plugins import Plugin, Event, EventContext, EventAction
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
@plugins.register(
name="agent",
desc="Use AgentMesh framework to process tasks with multi-agent teams",
version="0.1.0",
author="Saboteur7",
desire_priority=1,
)
class AgentPlugin(Plugin):
def __init__(self):
super().__init__()
self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
self.name = "agent"
self.description = "Use AgentMesh framework to process tasks with multi-agent teams"
self.config = self._load_config()
self.tool_manager = ToolManager()
self.tool_manager.load_tools(config_dict=self.config.get("tools"))
logger.debug("[agent] inited")
def _load_config(self) -> Dict:
config_path = os.path.join(self.path, "config.yaml")
if not os.path.exists(config_path):
logger.debug(f"Config file not found at {config_path}")
return {}
with open(config_path, 'r', encoding='utf-8') as f:
return yaml.safe_load(f)
def get_help_text(self, verbose=False, **kwargs):
help_text = "通过AgentMesh实现对终端、浏览器、文件系统、搜索引擎等工具的执行,并支持多智能体协作。"
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
if not verbose:
return help_text
teams = self.get_available_teams()
teams_str = ", ".join(teams) if teams else "未配置任何团队"
help_text += "\n\n使用说明:\n"
help_text += f"{trigger_prefix}agent [task] - 使用默认团队执行任务\n"
help_text += f"{trigger_prefix}agent teams - 列出可用的团队\n"
help_text += f"{trigger_prefix}agent use [team_name] [task] - 使用特定团队执行任务\n\n"
help_text += f"可用团队: \n{teams_str}\n\n"
help_text += f"示例:\n"
help_text += f"{trigger_prefix}agent 帮我查看当前文件夹路径\n"
help_text += f"{trigger_prefix}agent use software_team 帮我写一个产品预约体验的表单页面"
return help_text
def get_available_teams(self) -> List[str]:
teams_config = self.config.get("teams", {})
return list(teams_config.keys())
def create_team_from_config(self, team_name: str) -> Optional[AgentTeam]:
# Get teams configuration
teams_config = self.config.get("teams", {})
# Check if the specified team exists
if team_name not in teams_config:
logger.error(f"Team '{team_name}' not found in configuration.")
available_teams = list(teams_config.keys())
logger.info(f"Available teams: {', '.join(available_teams)}")
return None
# Get team configuration
team_config = teams_config[team_name]
# Get team's model
team_model_name = team_config.get("model", "gpt-4.1-mini")
team_model = self.create_llm_model(team_model_name)
# Get team's max_steps (default to 20 if not specified)
team_max_steps = team_config.get("max_steps", 20)
# Create team with the model
team = AgentTeam(
name=team_name,
description=team_config.get("description", ""),
rule=team_config.get("rule", ""),
model=team_model,
max_steps=team_max_steps
)
# Create and add agents to the team
agents_config = team_config.get("agents", [])
for agent_config in agents_config:
# Check if agent has a specific model
if agent_config.get("model"):
agent_model = self.create_llm_model(agent_config.get("model"))
else:
agent_model = team_model
# Get agent's max_steps
agent_max_steps = agent_config.get("max_steps")
agent = Agent(
name=agent_config.get("name", ""),
system_prompt=agent_config.get("system_prompt", ""),
model=agent_model, # Use agent's model if specified, otherwise will use team's model
description=agent_config.get("description", ""),
max_steps=agent_max_steps
)
# Add tools to the agent if specified
tool_names = agent_config.get("tools", [])
for tool_name in tool_names:
tool = self.tool_manager.create_tool(tool_name)
if tool:
agent.add_tool(tool)
else:
if tool_name == "browser":
logger.warning(
"Tool 'Browser' loaded failed, "
"please install the required dependency with: \n"
"'pip install browser-use>=0.1.40' or 'pip install agentmesh-sdk[full]'\n"
)
else:
logger.warning(f"Tool '{tool_name}' not found for agent '{agent.name}'\n")
# Add agent to team
team.add(agent)
return team
def on_handle_context(self, e_context: EventContext):
if e_context['context'].type != ContextType.TEXT:
return
content = e_context['context'].content
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
if not content.startswith(f"{trigger_prefix}agent "):
e_context.action = EventAction.CONTINUE
return
if not self.config:
reply = Reply()
reply.type = ReplyType.ERROR
reply.content = "未找到插件配置,请在 plugins/agent 目录下创建 config.yaml 配置文件,可根据 config-template.yml 模板文件复制"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
# Extract the actual task
task = content[len(f"{trigger_prefix}agent "):].strip()
# If task is empty, return help message
if not task:
reply = Reply()
reply.type = ReplyType.TEXT
reply.content = self.get_help_text(verbose=True)
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
# Check if task is asking for available teams
if task.lower() in ["teams", "list teams", "show teams"]:
teams = self.get_available_teams()
reply = Reply()
reply.type = ReplyType.TEXT
if not teams:
reply.content = "未配置任何团队。请检查 config.yaml 文件。"
else:
reply.content = f"可用团队: {', '.join(teams)}"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
# Check if task specifies a team
team_name = None
if task.startswith("use "):
parts = task[4:].split(" ", 1)
if len(parts) > 0:
team_name = parts[0]
if len(parts) > 1:
task = parts[1].strip()
else:
reply = Reply()
reply.type = ReplyType.TEXT
reply.content = f"已选择团队 '{team_name}'。请输入您想执行的任务。"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
if not team_name:
team_name = self.config.get("team")
# If no team specified, use default or first available
if not team_name:
teams = self.configself.get_available_teams()
if not teams:
reply = Reply()
reply.type = ReplyType.TEXT
reply.content = "未配置任何团队。请检查 config.yaml 文件。"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
team_name = teams[0]
# Create team
team = self.create_team_from_config(team_name)
if not team:
reply = Reply()
reply.type = ReplyType.TEXT
reply.content = f"创建团队 '{team_name}' 失败。请检查配置。"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
# Run the task
try:
logger.info(f"[agent] Running task '{task}' with team '{team_name}', team_model={team.model.model}")
result = team.run_async(task=task)
for agent_result in result:
res_text = f"🤖 {agent_result.get('agent_name')}\n\n{agent_result.get('final_answer')}"
_send_text(e_context, content=res_text)
reply = Reply()
reply.type = ReplyType.TEXT
reply.content = ""
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
except Exception as e:
logger.exception(f"Error running task with team '{team_name}'")
reply = Reply()
reply.type = ReplyType.ERROR
reply.content = f"执行任务时出错: {str(e)}"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
def create_llm_model(self, model_name) -> LLMModel:
if conf().get("use_linkai"):
api_base = "https://api.link-ai.tech/v1"
api_key = conf().get("linkai_api_key")
elif model_name.startswith(("gpt", "text-davinci", "o1", "o3")):
api_base = conf().get("open_ai_api_base") or "https://api.openai.com/v1"
api_key = conf().get("open_ai_api_key")
elif model_name.startswith("claude"):
return ClaudeModel(model=model_name, api_key=conf().get("claude_api_key"))
elif model_name.startswith("moonshot"):
api_base = "https://api.moonshot.cn/v1"
api_key = conf().get("moonshot_api_key")
elif model_name.startswith("qwen"):
api_base = "https://dashscope.aliyuncs.com/compatible-mode/v1"
api_key = conf().get("dashscope_api_key")
else:
api_base = conf().get("open_ai_api_base") or "https://api.openai.com/v1"
api_key = conf().get("open_ai_api_key")
llm_model = LLMModel(model=model_name, api_key=api_key, api_base=api_base)
return llm_model
def _send_text(e_context: EventContext, content: str):
reply = Reply(ReplyType.TEXT, content)
channel = e_context["channel"]
channel.send(reply, e_context["context"]) | --- +++ @@ -22,6 +22,7 @@ desire_priority=1,
)
class AgentPlugin(Plugin):
+ """Plugin for integrating AgentMesh framework."""
def __init__(self):
super().__init__()
@@ -34,6 +35,7 @@ logger.debug("[agent] inited")
def _load_config(self) -> Dict:
+ """Load configuration from config.yaml file."""
config_path = os.path.join(self.path, "config.yaml")
if not os.path.exists(config_path):
logger.debug(f"Config file not found at {config_path}")
@@ -43,6 +45,7 @@ return yaml.safe_load(f)
def get_help_text(self, verbose=False, **kwargs):
+ """Return help message for the agent plugin."""
help_text = "通过AgentMesh实现对终端、浏览器、文件系统、搜索引擎等工具的执行,并支持多智能体协作。"
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
@@ -63,11 +66,13 @@ return help_text
def get_available_teams(self) -> List[str]:
+ """Get list of available teams from configuration."""
teams_config = self.config.get("teams", {})
return list(teams_config.keys())
def create_team_from_config(self, team_name: str) -> Optional[AgentTeam]:
+ """Create a team from configuration."""
# Get teams configuration
teams_config = self.config.get("teams", {})
@@ -139,6 +144,7 @@ return team
def on_handle_context(self, e_context: EventContext):
+ """Handle the message context."""
if e_context['context'].type != ContextType.TEXT:
return
content = e_context['context'].content
@@ -273,4 +279,4 @@ def _send_text(e_context: EventContext, content: str):
reply = Reply(ReplyType.TEXT, content)
channel = e_context["channel"]
- channel.send(reply, e_context["context"])+ channel.send(reply, e_context["context"])
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/plugins/agent/agent.py |
Fill in missing docstrings in my code | # encoding:utf-8
import json
import time
import requests
from models.bot import Bot
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from .moonshot_session import MoonshotSession
# Moonshot (Kimi) API Bot
class MoonshotBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(MoonshotSession, model=conf().get("model") or "moonshot-v1-128k")
model = conf().get("model") or "moonshot-v1-128k"
if model == "moonshot":
model = "moonshot-v1-32k"
self.args = {
"model": model,
"temperature": conf().get("temperature", 0.3),
"top_p": conf().get("top_p", 1.0),
}
@property
def api_key(self):
return conf().get("moonshot_api_key")
@property
def base_url(self):
url = conf().get("moonshot_base_url", "https://api.moonshot.cn/v1")
if url.endswith("/chat/completions"):
url = url.rsplit("/chat/completions", 1)[0]
return url.rstrip("/")
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[MOONSHOT] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[MOONSHOT] session query={}".format(session.messages))
model = context.get("moonshot_model")
new_args = self.args.copy()
if model:
new_args["model"] = model
reply_content = self.reply_text(session, args=new_args)
logger.debug(
"[MOONSHOT] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[MOONSHOT] reply {} used 0 tokens.".format(reply_content))
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: MoonshotSession, args=None, retry_count: int = 0) -> dict:
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.api_key
}
body = args
body["messages"] = session.messages
res = requests.post(
f"{self.base_url}/chat/completions",
headers=headers,
json=body
)
if res.status_code == 200:
response = res.json()
return {
"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response["choices"][0]["message"]["content"]
}
else:
response = res.json()
error = response.get("error")
logger.error(f"[MOONSHOT] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
need_retry = False
if res.status_code >= 500:
logger.warn(f"[MOONSHOT] do retry, times={retry_count}")
need_retry = retry_count < 2
elif res.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
elif res.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2
else:
need_retry = False
if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
else:
return result
except Exception as e:
logger.exception(e)
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if need_retry:
return self.reply_text(session, args, retry_count + 1)
else:
return result
# ==================== Agent mode support ====================
def call_with_tools(self, messages, tools=None, stream: bool = False, **kwargs):
try:
# Convert messages from Claude format to OpenAI format
converted_messages = self._convert_messages_to_openai_format(messages)
# Inject system prompt if provided
system_prompt = kwargs.pop("system", None)
if system_prompt:
if not converted_messages or converted_messages[0].get("role") != "system":
converted_messages.insert(0, {"role": "system", "content": system_prompt})
else:
converted_messages[0] = {"role": "system", "content": system_prompt}
# Convert tools from Claude format to OpenAI format
converted_tools = None
if tools:
converted_tools = self._convert_tools_to_openai_format(tools)
# Resolve model / temperature
model = kwargs.pop("model", None) or self.args["model"]
max_tokens = kwargs.pop("max_tokens", None)
# Don't pop temperature, just ignore it
kwargs.pop("temperature", None)
# Build request body (omit temperature, let the API use its own default)
request_body = {
"model": model,
"messages": converted_messages,
"stream": stream,
}
if max_tokens is not None:
request_body["max_tokens"] = max_tokens
# Add tools
if converted_tools:
request_body["tools"] = converted_tools
request_body["tool_choice"] = "auto"
# Explicitly disable thinking to avoid reasoning_content issues in multi-turn tool calls.
# kimi-k2.5 may enable thinking by default; without preserving reasoning_content
# in conversation history the API will reject subsequent requests.
request_body["thinking"] = {"type": "disabled"}
logger.debug(f"[MOONSHOT] API call: model={model}, "
f"tools={len(converted_tools) if converted_tools else 0}, stream={stream}")
if stream:
return self._handle_stream_response(request_body)
else:
return self._handle_sync_response(request_body)
except Exception as e:
logger.error(f"[MOONSHOT] call_with_tools error: {e}")
import traceback
logger.error(traceback.format_exc())
def error_generator():
yield {"error": True, "message": str(e), "status_code": 500}
return error_generator()
# -------------------- streaming --------------------
def _handle_stream_response(self, request_body: dict):
try:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
url = f"{self.base_url}/chat/completions"
response = requests.post(url, headers=headers, json=request_body, stream=True, timeout=120)
if response.status_code != 200:
error_msg = response.text
logger.error(f"[MOONSHOT] API error: status={response.status_code}, msg={error_msg}")
yield {"error": True, "message": error_msg, "status_code": response.status_code}
return
current_tool_calls = {}
finish_reason = None
for line in response.iter_lines():
if not line:
continue
line = line.decode("utf-8")
if not line.startswith("data: "):
continue
data_str = line[6:] # Remove "data: " prefix
if data_str.strip() == "[DONE]":
break
try:
chunk = json.loads(data_str)
except json.JSONDecodeError as e:
logger.warning(f"[MOONSHOT] JSON decode error: {e}, data: {data_str[:200]}")
continue
# Check for error in chunk
if chunk.get("error"):
error_data = chunk["error"]
error_msg = error_data.get("message", "Unknown error") if isinstance(error_data, dict) else str(error_data)
logger.error(f"[MOONSHOT] stream error: {error_msg}")
yield {"error": True, "message": error_msg, "status_code": 500}
return
if not chunk.get("choices"):
continue
choice = chunk["choices"][0]
delta = choice.get("delta", {})
# Skip reasoning_content (thinking) – don't log or forward
if delta.get("reasoning_content"):
continue
# Handle text content
if "content" in delta and delta["content"]:
yield {
"choices": [{
"index": 0,
"delta": {
"role": "assistant",
"content": delta["content"]
}
}]
}
# Handle tool_calls (streamed incrementally)
if "tool_calls" in delta:
for tool_call_chunk in delta["tool_calls"]:
index = tool_call_chunk.get("index", 0)
if index not in current_tool_calls:
current_tool_calls[index] = {
"id": tool_call_chunk.get("id", ""),
"type": "tool_use",
"name": tool_call_chunk.get("function", {}).get("name", ""),
"input": ""
}
# Accumulate arguments
if "function" in tool_call_chunk and "arguments" in tool_call_chunk["function"]:
current_tool_calls[index]["input"] += tool_call_chunk["function"]["arguments"]
# Yield OpenAI-format tool call delta
yield {
"choices": [{
"index": 0,
"delta": {
"tool_calls": [tool_call_chunk]
}
}]
}
# Capture finish_reason
if choice.get("finish_reason"):
finish_reason = choice["finish_reason"]
# Final chunk with finish_reason
yield {
"choices": [{
"index": 0,
"delta": {},
"finish_reason": finish_reason
}]
}
except requests.exceptions.Timeout:
logger.error("[MOONSHOT] Request timeout")
yield {"error": True, "message": "Request timeout", "status_code": 500}
except Exception as e:
logger.error(f"[MOONSHOT] stream response error: {e}")
import traceback
logger.error(traceback.format_exc())
yield {"error": True, "message": str(e), "status_code": 500}
# -------------------- sync --------------------
def _handle_sync_response(self, request_body: dict):
try:
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.api_key}"
}
request_body.pop("stream", None)
url = f"{self.base_url}/chat/completions"
response = requests.post(url, headers=headers, json=request_body, timeout=120)
if response.status_code != 200:
error_msg = response.text
logger.error(f"[MOONSHOT] API error: status={response.status_code}, msg={error_msg}")
yield {"error": True, "message": error_msg, "status_code": response.status_code}
return
result = response.json()
message = result["choices"][0]["message"]
finish_reason = result["choices"][0]["finish_reason"]
response_data = {"role": "assistant", "content": []}
# Add text content
if message.get("content"):
response_data["content"].append({
"type": "text",
"text": message["content"]
})
# Add tool calls
if message.get("tool_calls"):
for tool_call in message["tool_calls"]:
response_data["content"].append({
"type": "tool_use",
"id": tool_call["id"],
"name": tool_call["function"]["name"],
"input": json.loads(tool_call["function"]["arguments"])
})
# Map finish_reason
if finish_reason == "tool_calls":
response_data["stop_reason"] = "tool_use"
elif finish_reason == "stop":
response_data["stop_reason"] = "end_turn"
else:
response_data["stop_reason"] = finish_reason
yield response_data
except requests.exceptions.Timeout:
logger.error("[MOONSHOT] Request timeout")
yield {"error": True, "message": "Request timeout", "status_code": 500}
except Exception as e:
logger.error(f"[MOONSHOT] sync response error: {e}")
import traceback
logger.error(traceback.format_exc())
yield {"error": True, "message": str(e), "status_code": 500}
# -------------------- format conversion --------------------
def _convert_messages_to_openai_format(self, messages):
if not messages:
return []
converted = []
for msg in messages:
role = msg.get("role")
content = msg.get("content")
# Already a simple string – pass through
if isinstance(content, str):
converted.append(msg)
continue
if not isinstance(content, list):
converted.append(msg)
continue
if role == "user":
text_parts = []
tool_results = []
for block in content:
if not isinstance(block, dict):
continue
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_result":
tool_call_id = block.get("tool_use_id") or ""
result_content = block.get("content", "")
if not isinstance(result_content, str):
result_content = json.dumps(result_content, ensure_ascii=False)
tool_results.append({
"role": "tool",
"tool_call_id": tool_call_id,
"content": result_content
})
# Tool results first (must come right after assistant with tool_calls)
for tr in tool_results:
converted.append(tr)
if text_parts:
converted.append({"role": "user", "content": "\n".join(text_parts)})
elif role == "assistant":
openai_msg = {"role": "assistant"}
text_parts = []
tool_calls = []
for block in content:
if not isinstance(block, dict):
continue
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_use":
tool_calls.append({
"id": block.get("id"),
"type": "function",
"function": {
"name": block.get("name"),
"arguments": json.dumps(block.get("input", {}))
}
})
if text_parts:
openai_msg["content"] = "\n".join(text_parts)
elif not tool_calls:
openai_msg["content"] = ""
if tool_calls:
openai_msg["tool_calls"] = tool_calls
if not text_parts:
openai_msg["content"] = None
converted.append(openai_msg)
else:
converted.append(msg)
return converted
def _convert_tools_to_openai_format(self, tools):
if not tools:
return None
converted = []
for tool in tools:
# Already in OpenAI format
if "type" in tool and tool["type"] == "function":
converted.append(tool)
else:
converted.append({
"type": "function",
"function": {
"name": tool.get("name"),
"description": tool.get("description"),
"parameters": tool.get("input_schema", {})
}
})
return converted | --- +++ @@ -88,6 +88,13 @@ return reply
def reply_text(self, session: MoonshotSession, args=None, retry_count: int = 0) -> dict:
+ """
+ Call Moonshot chat completion API to get the answer
+ :param session: a conversation session
+ :param args: model args
+ :param retry_count: retry count
+ :return: {}
+ """
try:
headers = {
"Content-Type": "application/json",
@@ -143,6 +150,24 @@ # ==================== Agent mode support ====================
def call_with_tools(self, messages, tools=None, stream: bool = False, **kwargs):
+ """
+ Call Moonshot API with tool support for agent integration.
+
+ This method handles:
+ 1. Format conversion (Claude format -> OpenAI format)
+ 2. System prompt injection
+ 3. Streaming SSE response with tool_calls
+ 4. Thinking (reasoning) is disabled by default to avoid tool_choice conflicts
+
+ Args:
+ messages: List of messages (may be in Claude format from agent)
+ tools: List of tool definitions (may be in Claude format from agent)
+ stream: Whether to use streaming
+ **kwargs: Additional parameters (max_tokens, temperature, system, model, etc.)
+
+ Returns:
+ Generator yielding OpenAI-format chunks (for streaming)
+ """
try:
# Convert messages from Claude format to OpenAI format
converted_messages = self._convert_messages_to_openai_format(messages)
@@ -205,6 +230,7 @@ # -------------------- streaming --------------------
def _handle_stream_response(self, request_body: dict):
+ """Handle streaming SSE response from Moonshot API and yield OpenAI-format chunks."""
try:
headers = {
"Content-Type": "application/json",
@@ -322,6 +348,7 @@ # -------------------- sync --------------------
def _handle_sync_response(self, request_body: dict):
+ """Handle synchronous API response and yield a single result dict."""
try:
headers = {
"Content-Type": "application/json",
@@ -383,6 +410,12 @@ # -------------------- format conversion --------------------
def _convert_messages_to_openai_format(self, messages):
+ """
+ Convert messages from Claude format to OpenAI format.
+
+ Claude format uses content blocks: tool_use / tool_result / text
+ OpenAI format uses tool_calls in assistant, role=tool for results
+ """
if not messages:
return []
@@ -465,6 +498,12 @@ return converted
def _convert_tools_to_openai_format(self, tools):
+ """
+ Convert tools from Claude format to OpenAI format.
+
+ Claude: {name, description, input_schema}
+ OpenAI: {type: "function", function: {name, description, parameters}}
+ """
if not tools:
return None
@@ -483,4 +522,4 @@ }
})
- return converted+ return converted
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/moonshot/moonshot_bot.py |
Document all endpoints with docstrings | # -*- coding: utf-8 -*-
import json
import os
import re
import time
from bridge.reply import Reply, ReplyType
from common.log import logger
from voice.voice import Voice
from voice.ali.ali_api import AliyunTokenGenerator, speech_to_text_aliyun, text_to_speech_aliyun
from config import conf
try:
from voice.audio_convert import get_pcm_from_wav
except ImportError as e:
logger.debug("import voice.audio_convert failed: {}".format(e))
class AliVoice(Voice):
def __init__(self):
try:
curdir = os.path.dirname(__file__)
config_path = os.path.join(curdir, "config.json")
with open(config_path, "r") as fr:
config = json.load(fr)
self.token = None
self.token_expire_time = 0
# 默认复用阿里云千问的 access_key 和 access_secret
self.api_url_voice_to_text = config.get("api_url_voice_to_text")
self.api_url_text_to_voice = config.get("api_url_text_to_voice")
self.app_key = config.get("app_key")
self.access_key_id = conf().get("qwen_access_key_id") or config.get("access_key_id")
self.access_key_secret = conf().get("qwen_access_key_secret") or config.get("access_key_secret")
except Exception as e:
logger.warn("AliVoice init failed: %s, ignore " % e)
def textToVoice(self, text):
# 清除文本中的非中文、非英文和非基本字符
text = re.sub(r'[^\u4e00-\u9fa5\u3040-\u30FF\uAC00-\uD7AFa-zA-Z0-9'
r'äöüÄÖÜáéíóúÁÉÍÓÚàèìòùÀÈÌÒÙâêîôûÂÊÎÔÛçÇñÑ,。!?,.]', '', text)
# 提取有效的token
token_id = self.get_valid_token()
fileName = text_to_speech_aliyun(self.api_url_text_to_voice, text, self.app_key, token_id)
if fileName:
logger.info("[Ali] textToVoice text={} voice file name={}".format(text, fileName))
reply = Reply(ReplyType.VOICE, fileName)
else:
reply = Reply(ReplyType.ERROR, "抱歉,语音合成失败")
return reply
def voiceToText(self, voice_file):
# 提取有效的token
token_id = self.get_valid_token()
logger.debug("[Ali] voice file name={}".format(voice_file))
pcm = get_pcm_from_wav(voice_file)
text = speech_to_text_aliyun(self.api_url_voice_to_text, pcm, self.app_key, token_id)
if text:
logger.info("[Ali] VoicetoText = {}".format(text))
reply = Reply(ReplyType.TEXT, text)
else:
reply = Reply(ReplyType.ERROR, "抱歉,语音识别失败")
return reply
def get_valid_token(self):
current_time = time.time()
if self.token is None or current_time >= self.token_expire_time:
get_token = AliyunTokenGenerator(self.access_key_id, self.access_key_secret)
token_str = get_token.get_token()
token_data = json.loads(token_str)
self.token = token_data["Token"]["Id"]
# 将过期时间减少一小段时间(例如5分钟),以避免在边界条件下的过期
self.token_expire_time = token_data["Token"]["ExpireTime"] - 300
logger.debug(f"新获取的阿里云token:{self.token}")
else:
logger.debug("使用缓存的token")
return self.token | --- +++ @@ -1,4 +1,13 @@ # -*- coding: utf-8 -*-
+"""
+Author: chazzjimel
+Email: chazzjimel@gmail.com
+wechat:cheung-z-x
+
+Description:
+ali voice service
+
+"""
import json
import os
import re
@@ -18,6 +27,9 @@
class AliVoice(Voice):
def __init__(self):
+ """
+ 初始化AliVoice类,从配置文件加载必要的配置。
+ """
try:
curdir = os.path.dirname(__file__)
config_path = os.path.join(curdir, "config.json")
@@ -35,6 +47,12 @@ logger.warn("AliVoice init failed: %s, ignore " % e)
def textToVoice(self, text):
+ """
+ 将文本转换为语音文件。
+
+ :param text: 要转换的文本。
+ :return: 返回一个Reply对象,其中包含转换得到的语音文件或错误信息。
+ """
# 清除文本中的非中文、非英文和非基本字符
text = re.sub(r'[^\u4e00-\u9fa5\u3040-\u30FF\uAC00-\uD7AFa-zA-Z0-9'
r'äöüÄÖÜáéíóúÁÉÍÓÚàèìòùÀÈÌÒÙâêîôûÂÊÎÔÛçÇñÑ,。!?,.]', '', text)
@@ -49,6 +67,12 @@ return reply
def voiceToText(self, voice_file):
+ """
+ 将语音文件转换为文本。
+
+ :param voice_file: 要转换的语音文件。
+ :return: 返回一个Reply对象,其中包含转换得到的文本或错误信息。
+ """
# 提取有效的token
token_id = self.get_valid_token()
logger.debug("[Ali] voice file name={}".format(voice_file))
@@ -62,6 +86,11 @@ return reply
def get_valid_token(self):
+ """
+ 获取有效的阿里云token。
+
+ :return: 返回有效的token字符串。
+ """
current_time = time.time()
if self.token is None or current_time >= self.token_expire_time:
get_token = AliyunTokenGenerator(self.access_key_id, self.access_key_secret)
@@ -73,4 +102,4 @@ logger.debug(f"新获取的阿里云token:{self.token}")
else:
logger.debug("使用缓存的token")
- return self.token+ return self.token
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/voice/ali/ali_voice.py |
Create structured documentation for my script | #!/usr/bin/env python3
import sys
from pathlib import Path
SKILL_TEMPLATE = """---
name: {skill_name}
description: [TODO: Complete and informative explanation of what the skill does and when to use it. Include WHEN to use this skill - specific scenarios, file types, or tasks that trigger it.]
---
# {skill_title}
## Overview
[TODO: 1-2 sentences explaining what this skill enables]
## Structuring This Skill
[TODO: Choose the structure that best fits this skill's purpose. Common patterns:
**1. Workflow-Based** (best for sequential processes)
- Works well when there are clear step-by-step procedures
- Example: DOCX skill with "Workflow Decision Tree" → "Reading" → "Creating" → "Editing"
- Structure: ## Overview → ## Workflow Decision Tree → ## Step 1 → ## Step 2...
**2. Task-Based** (best for tool collections)
- Works well when the skill offers different operations/capabilities
- Example: PDF skill with "Quick Start" → "Merge PDFs" → "Split PDFs" → "Extract Text"
- Structure: ## Overview → ## Quick Start → ## Task Category 1 → ## Task Category 2...
**3. Reference/Guidelines** (best for standards or specifications)
- Works well for brand guidelines, coding standards, or requirements
- Example: Brand styling with "Brand Guidelines" → "Colors" → "Typography" → "Features"
- Structure: ## Overview → ## Guidelines → ## Specifications → ## Usage...
**4. Capabilities-Based** (best for integrated systems)
- Works well when the skill provides multiple interrelated features
- Example: Product Management with "Core Capabilities" → numbered capability list
- Structure: ## Overview → ## Core Capabilities → ### 1. Feature → ### 2. Feature...
Patterns can be mixed and matched as needed. Most skills combine patterns (e.g., start with task-based, add workflow for complex operations).
Delete this entire "Structuring This Skill" section when done - it's just guidance.]
## [TODO: Replace with the first main section based on chosen structure]
[TODO: Add content here. See examples in existing skills:
- Code samples for technical skills
- Decision trees for complex workflows
- Concrete examples with realistic user requests
- References to scripts/templates/references as needed]
## Resources
This skill includes example resource directories that demonstrate how to organize different types of bundled resources:
### scripts/
Executable code (Python/Bash/etc.) that can be run directly to perform specific operations.
**Examples from other skills:**
- PDF skill: `fill_fillable_fields.py`, `extract_form_field_info.py` - utilities for PDF manipulation
- DOCX skill: `document.py`, `utilities.py` - Python modules for document processing
**Appropriate for:** Python scripts, shell scripts, or any executable code that performs automation, data processing, or specific operations.
**Note:** Scripts may be executed without loading into context, but can still be read by Claude for patching or environment adjustments.
### references/
Documentation and reference material intended to be loaded into context to inform Claude's process and thinking.
**Examples from other skills:**
- Product management: `communication.md`, `context_building.md` - detailed workflow guides
- BigQuery: API reference documentation and query examples
- Finance: Schema documentation, company policies
**Appropriate for:** In-depth documentation, API references, database schemas, comprehensive guides, or any detailed information that Claude should reference while working.
### assets/
Files not intended to be loaded into context, but rather used within the output Claude produces.
**Examples from other skills:**
- Brand styling: PowerPoint template files (.pptx), logo files
- Frontend builder: HTML/React boilerplate project directories
- Typography: Font files (.ttf, .woff2)
**Appropriate for:** Templates, boilerplate code, document templates, images, icons, fonts, or any files meant to be copied or used in the final output.
---
**Any unneeded directories can be deleted.** Not every skill requires all three types of resources.
"""
EXAMPLE_SCRIPT = '''#!/usr/bin/env python3
"""
Example helper script for {skill_name}
This is a placeholder script that can be executed directly.
Replace with actual implementation or delete if not needed.
Example real scripts from other skills:
- pdf/scripts/fill_fillable_fields.py - Fills PDF form fields
- pdf/scripts/convert_pdf_to_images.py - Converts PDF pages to images
"""
def main():
print("This is an example script for {skill_name}")
# TODO: Add actual script logic here
# This could be data processing, file conversion, API calls, etc.
if __name__ == "__main__":
main()
'''
EXAMPLE_REFERENCE = """# Reference Documentation for {skill_title}
This is a placeholder for detailed reference documentation.
Replace with actual reference content or delete if not needed.
Example real reference docs from other skills:
- product-management/references/communication.md - Comprehensive guide for status updates
- product-management/references/context_building.md - Deep-dive on gathering context
- bigquery/references/ - API references and query examples
## When Reference Docs Are Useful
Reference docs are ideal for:
- Comprehensive API documentation
- Detailed workflow guides
- Complex multi-step processes
- Information too lengthy for main SKILL.md
- Content that's only needed for specific use cases
## Structure Suggestions
### API Reference Example
- Overview
- Authentication
- Endpoints with examples
- Error codes
- Rate limits
### Workflow Guide Example
- Prerequisites
- Step-by-step instructions
- Common patterns
- Troubleshooting
- Best practices
"""
EXAMPLE_ASSET = """# Example Asset File
This placeholder represents where asset files would be stored.
Replace with actual asset files (templates, images, fonts, etc.) or delete if not needed.
Asset files are NOT intended to be loaded into context, but rather used within
the output Claude produces.
Example asset files from other skills:
- Brand guidelines: logo.png, slides_template.pptx
- Frontend builder: hello-world/ directory with HTML/React boilerplate
- Typography: custom-font.ttf, font-family.woff2
- Data: sample_data.csv, test_dataset.json
## Common Asset Types
- Templates: .pptx, .docx, boilerplate directories
- Images: .png, .jpg, .svg, .gif
- Fonts: .ttf, .otf, .woff, .woff2
- Boilerplate code: Project directories, starter files
- Icons: .ico, .svg
- Data files: .csv, .json, .xml, .yaml
Note: This is a text placeholder. Actual assets can be any file type.
"""
def title_case_skill_name(skill_name):
return ' '.join(word.capitalize() for word in skill_name.split('-'))
def init_skill(skill_name, path):
# Determine skill directory path
skill_dir = Path(path).resolve() / skill_name
# Check if directory already exists
if skill_dir.exists():
print(f"❌ Error: Skill directory already exists: {skill_dir}")
return None
# Create skill directory
try:
skill_dir.mkdir(parents=True, exist_ok=False)
print(f"✅ Created skill directory: {skill_dir}")
except Exception as e:
print(f"❌ Error creating directory: {e}")
return None
# Create SKILL.md from template
skill_title = title_case_skill_name(skill_name)
skill_content = SKILL_TEMPLATE.format(
skill_name=skill_name,
skill_title=skill_title
)
skill_md_path = skill_dir / 'SKILL.md'
try:
skill_md_path.write_text(skill_content)
print("✅ Created SKILL.md")
except Exception as e:
print(f"❌ Error creating SKILL.md: {e}")
return None
# Create resource directories with example files
try:
# Create scripts/ directory with example script
scripts_dir = skill_dir / 'scripts'
scripts_dir.mkdir(exist_ok=True)
example_script = scripts_dir / 'example.py'
example_script.write_text(EXAMPLE_SCRIPT.format(skill_name=skill_name))
example_script.chmod(0o755)
print("✅ Created scripts/example.py")
# Create references/ directory with example reference doc
references_dir = skill_dir / 'references'
references_dir.mkdir(exist_ok=True)
example_reference = references_dir / 'api_reference.md'
example_reference.write_text(EXAMPLE_REFERENCE.format(skill_title=skill_title))
print("✅ Created references/api_reference.md")
# Create assets/ directory with example asset placeholder
assets_dir = skill_dir / 'assets'
assets_dir.mkdir(exist_ok=True)
example_asset = assets_dir / 'example_asset.txt'
example_asset.write_text(EXAMPLE_ASSET)
print("✅ Created assets/example_asset.txt")
except Exception as e:
print(f"❌ Error creating resource directories: {e}")
return None
# Print next steps
print(f"\n✅ Skill '{skill_name}' initialized successfully at {skill_dir}")
print("\nNext steps:")
print("1. Edit SKILL.md to complete the TODO items and update the description")
print("2. Customize or delete the example files in scripts/, references/, and assets/")
print("3. Run the validator when ready to check the skill structure")
return skill_dir
def main():
if len(sys.argv) < 4 or sys.argv[2] != '--path':
print("Usage: init_skill.py <skill-name> --path <path>")
print("\nSkill name requirements:")
print(" - Hyphen-case identifier (e.g., 'data-analyzer')")
print(" - Lowercase letters, digits, and hyphens only")
print(" - Max 40 characters")
print(" - Must match directory name exactly")
print("\nExamples:")
print(" init_skill.py my-new-skill --path workspace/skills")
print(" init_skill.py my-api-helper --path /path/to/skills")
print(" init_skill.py custom-skill --path /custom/location")
sys.exit(1)
skill_name = sys.argv[1]
path = sys.argv[3]
print(f"🚀 Initializing skill: {skill_name}")
print(f" Location: {path}")
print()
result = init_skill(skill_name, path)
if result:
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main() | --- +++ @@ -1,4 +1,15 @@ #!/usr/bin/env python3
+"""
+Skill Initializer - Creates a new skill from template
+
+Usage:
+ init_skill.py <skill-name> --path <path>
+
+Examples:
+ init_skill.py my-new-skill --path skills/public
+ init_skill.py my-api-helper --path skills/private
+ init_skill.py custom-skill --path /custom/location
+"""
import sys
from pathlib import Path
@@ -176,10 +187,21 @@
def title_case_skill_name(skill_name):
+ """Convert hyphenated skill name to Title Case for display."""
return ' '.join(word.capitalize() for word in skill_name.split('-'))
def init_skill(skill_name, path):
+ """
+ Initialize a new skill directory with template SKILL.md.
+
+ Args:
+ skill_name: Name of the skill
+ path: Path where the skill directory should be created
+
+ Returns:
+ Path to created skill directory, or None if error
+ """
# Determine skill directory path
skill_dir = Path(path).resolve() / skill_name
@@ -278,4 +300,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/skills/skill-creator/scripts/init_skill.py |
Write docstrings for algorithm functions | # encoding:utf-8
import time
import json
from models.bot import Bot
from models.zhipuai.zhipu_ai_session import ZhipuAISession
from models.zhipuai.zhipu_ai_image import ZhipuAIImage
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf, load_config
from zai import ZhipuAiClient
# ZhipuAI对话模型API
class ZHIPUAIBot(Bot, ZhipuAIImage):
def __init__(self):
super().__init__()
self.sessions = SessionManager(ZhipuAISession, model=conf().get("model") or "ZHIPU_AI")
self.args = {
"model": conf().get("model") or "glm-4", # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在(0,1)之间(智谱AI 的温度不能取 0 或者 1)
"top_p": conf().get("top_p", 0.7), # 值在(0,1)之间(智谱AI 的 top_p 不能取 0 或者 1)
}
# 初始化客户端,支持自定义 API base URL(例如智谱国际版 z.ai)
api_key = conf().get("zhipu_ai_api_key")
api_base = conf().get("zhipu_ai_api_base")
if api_base:
self.client = ZhipuAiClient(api_key=api_key, base_url=api_base)
else:
self.client = ZhipuAiClient(api_key=api_key)
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[ZHIPU_AI] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[ZHIPU_AI] session query={}".format(session.messages))
model = context.get("gpt_model")
new_args = None
if model:
new_args = self.args.copy()
new_args["model"] = model
reply_content = self.reply_text(session, args=new_args)
logger.debug(
"[ZHIPU_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[ZHIPU_AI] reply {} used 0 tokens.".format(reply_content))
return reply
elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_text(self, session: ZhipuAISession, args=None, retry_count=0) -> dict:
try:
if args is None:
args = self.args
response = self.client.chat.completions.create(messages=session.messages, **args)
# logger.debug("[ZHIPU_AI] response={}".format(response))
# logger.info("[ZHIPU_AI] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return {
"total_tokens": response.usage.total_tokens,
"completion_tokens": response.usage.completion_tokens,
"content": response.choices[0].message.content,
}
except Exception as e:
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
error_str = str(e).lower()
# Check error type by error message content
if "rate" in error_str and "limit" in error_str:
logger.warn("[ZHIPU_AI] RateLimitError: {}".format(e))
result["content"] = "提问太快啦,请休息一下再问我吧"
if need_retry:
time.sleep(20)
elif "timeout" in error_str or "timed out" in error_str:
logger.warn("[ZHIPU_AI] Timeout: {}".format(e))
result["content"] = "我没有收到你的消息"
if need_retry:
time.sleep(5)
elif "api" in error_str and ("error" in error_str or "gateway" in error_str):
logger.warn("[ZHIPU_AI] APIError: {}".format(e))
result["content"] = "请再问我一次"
if need_retry:
time.sleep(10)
elif "connection" in error_str or "network" in error_str:
logger.warn("[ZHIPU_AI] ConnectionError: {}".format(e))
result["content"] = "我连接不到你的网络"
if need_retry:
time.sleep(5)
else:
logger.exception("[ZHIPU_AI] Exception: {}".format(e), e)
need_retry = False
self.sessions.clear_session(session.session_id)
if need_retry:
logger.warn("[ZHIPU_AI] 第{}次重试".format(retry_count + 1))
return self.reply_text(session, args, retry_count + 1)
else:
return result
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
try:
# Convert messages from Claude format to ZhipuAI format
messages = self._convert_messages_to_zhipu_format(messages)
# Convert tools from Claude format to ZhipuAI format
if tools:
tools = self._convert_tools_to_zhipu_format(tools)
# Handle system prompt
system_prompt = kwargs.get('system')
if system_prompt:
# Add system message at the beginning if not already present
if not messages or messages[0].get('role') != 'system':
messages = [{"role": "system", "content": system_prompt}] + messages
else:
# Replace existing system message
messages[0] = {"role": "system", "content": system_prompt}
# Build request parameters
request_params = {
"model": kwargs.get("model", self.args.get("model", "glm-4")),
"messages": messages,
"temperature": kwargs.get("temperature", self.args.get("temperature", 0.9)),
"top_p": kwargs.get("top_p", self.args.get("top_p", 0.7)),
"stream": stream
}
# Add max_tokens if specified
if kwargs.get("max_tokens"):
request_params["max_tokens"] = kwargs["max_tokens"]
# Add tools if provided
if tools:
request_params["tools"] = tools
# GLM-4.7 with zai-sdk supports tool_stream for streaming tool calls
if stream:
request_params["tool_stream"] = kwargs.get("tool_stream", True)
# Add thinking parameter for deep thinking mode (GLM-4.7)
thinking = kwargs.get("thinking")
if thinking:
request_params["thinking"] = thinking
elif "glm-4.7" in request_params["model"]:
# Enable thinking by default for GLM-4.7
request_params["thinking"] = {"type": "disabled"}
# Make API call with ZhipuAI SDK
if stream:
return self._handle_stream_response(request_params)
else:
return self._handle_sync_response(request_params)
except Exception as e:
error_msg = str(e)
logger.error(f"[ZHIPU_AI] call_with_tools error: {error_msg}")
if stream:
def error_generator():
yield {
"error": True,
"message": error_msg,
"status_code": 500
}
return error_generator()
else:
return {
"error": True,
"message": error_msg,
"status_code": 500
}
def _handle_sync_response(self, request_params):
try:
response = self.client.chat.completions.create(**request_params)
# Convert ZhipuAI response to OpenAI-compatible format
return {
"id": response.id,
"object": "chat.completion",
"created": response.created,
"model": response.model,
"choices": [{
"index": 0,
"message": {
"role": response.choices[0].message.role,
"content": response.choices[0].message.content,
"tool_calls": self._convert_tool_calls_to_openai_format(
getattr(response.choices[0].message, 'tool_calls', None)
)
},
"finish_reason": response.choices[0].finish_reason
}],
"usage": {
"prompt_tokens": response.usage.prompt_tokens,
"completion_tokens": response.usage.completion_tokens,
"total_tokens": response.usage.total_tokens
}
}
except Exception as e:
logger.error(f"[ZHIPU_AI] sync response error: {e}")
return {
"error": True,
"message": str(e),
"status_code": 500
}
def _handle_stream_response(self, request_params):
try:
stream = self.client.chat.completions.create(**request_params)
# Stream chunks to caller, converting to OpenAI format
for chunk in stream:
if not chunk.choices:
continue
delta = chunk.choices[0].delta
# Convert to OpenAI-compatible format
openai_chunk = {
"id": chunk.id,
"object": "chat.completion.chunk",
"created": chunk.created,
"model": chunk.model,
"choices": [{
"index": 0,
"delta": {},
"finish_reason": chunk.choices[0].finish_reason
}]
}
# Add role if present
if hasattr(delta, 'role') and delta.role:
openai_chunk["choices"][0]["delta"]["role"] = delta.role
# Add content if present
if hasattr(delta, 'content') and delta.content:
openai_chunk["choices"][0]["delta"]["content"] = delta.content
# Add reasoning_content as separate field if present (GLM-5/GLM-4.7 thinking)
if hasattr(delta, 'reasoning_content') and delta.reasoning_content:
openai_chunk["choices"][0]["delta"]["reasoning_content"] = delta.reasoning_content
# Add tool_calls if present
if hasattr(delta, 'tool_calls') and delta.tool_calls:
# For streaming, tool_calls need special handling
openai_tool_calls = []
for tc in delta.tool_calls:
tool_call_dict = {
"index": getattr(tc, 'index', 0),
"id": getattr(tc, 'id', None),
"type": "function",
"function": {}
}
# Add function name if present
if hasattr(tc, 'function') and hasattr(tc.function, 'name') and tc.function.name:
tool_call_dict["function"]["name"] = tc.function.name
# Add function arguments if present
if hasattr(tc, 'function') and hasattr(tc.function, 'arguments') and tc.function.arguments:
tool_call_dict["function"]["arguments"] = tc.function.arguments
openai_tool_calls.append(tool_call_dict)
openai_chunk["choices"][0]["delta"]["tool_calls"] = openai_tool_calls
yield openai_chunk
except Exception as e:
logger.error(f"[ZHIPU_AI] stream response error: {e}")
yield {
"error": True,
"message": str(e),
"status_code": 500
}
def _convert_tools_to_zhipu_format(self, tools):
if not tools:
return None
zhipu_tools = []
for tool in tools:
# Check if already in ZhipuAI/OpenAI format
if 'type' in tool and tool['type'] == 'function':
zhipu_tools.append(tool)
else:
# Convert from Claude format
zhipu_tools.append({
"type": "function",
"function": {
"name": tool.get("name"),
"description": tool.get("description"),
"parameters": tool.get("input_schema", {})
}
})
return zhipu_tools
def _convert_messages_to_zhipu_format(self, messages):
if not messages:
return []
zhipu_messages = []
for msg in messages:
role = msg.get("role")
content = msg.get("content")
# Handle string content (already in correct format)
if isinstance(content, str):
zhipu_messages.append(msg)
continue
# Handle list content (Claude format with content blocks)
if isinstance(content, list):
# Check if this is a tool result message (user role with tool_result blocks)
if role == "user" and any(block.get("type") == "tool_result" for block in content):
# Convert each tool_result block to a separate tool message
for block in content:
if block.get("type") == "tool_result":
zhipu_messages.append({
"role": "tool",
"tool_call_id": block.get("tool_use_id"),
"content": block.get("content", "")
})
# Check if this is an assistant message with tool_use blocks
elif role == "assistant":
# Separate text content and tool_use blocks
text_parts = []
tool_calls = []
for block in content:
if block.get("type") == "text":
text_parts.append(block.get("text", ""))
elif block.get("type") == "tool_use":
tool_calls.append({
"id": block.get("id"),
"type": "function",
"function": {
"name": block.get("name"),
"arguments": json.dumps(block.get("input", {}))
}
})
# Build ZhipuAI format assistant message
zhipu_msg = {
"role": "assistant",
"content": " ".join(text_parts) if text_parts else None
}
if tool_calls:
zhipu_msg["tool_calls"] = tool_calls
zhipu_messages.append(zhipu_msg)
else:
# Other list content, keep as is
zhipu_messages.append(msg)
else:
# Other formats, keep as is
zhipu_messages.append(msg)
return zhipu_messages
def _convert_tool_calls_to_openai_format(self, tool_calls):
if not tool_calls:
return None
openai_tool_calls = []
for tool_call in tool_calls:
openai_tool_calls.append({
"id": tool_call.id,
"type": "function",
"function": {
"name": tool_call.function.name,
"arguments": tool_call.function.arguments
}
})
return openai_tool_calls | --- +++ @@ -93,6 +93,13 @@ return reply
def reply_text(self, session: ZhipuAISession, args=None, retry_count=0) -> dict:
+ """
+ Call ZhipuAI API to get the answer
+ :param session: a conversation session
+ :param args: request arguments
+ :param retry_count: retry count
+ :return: {}
+ """
try:
if args is None:
args = self.args
@@ -143,6 +150,24 @@ return result
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
+ """
+ Call ZhipuAI API with tool support for agent integration
+
+ This method handles:
+ 1. Format conversion (Claude format → ZhipuAI format)
+ 2. System prompt injection
+ 3. API calling with ZhipuAI SDK
+ 4. Tool stream support (tool_stream=True for GLM-4.7)
+
+ Args:
+ messages: List of messages (may be in Claude format from agent)
+ tools: List of tool definitions (may be in Claude format from agent)
+ stream: Whether to use streaming
+ **kwargs: Additional parameters (max_tokens, temperature, system, etc.)
+
+ Returns:
+ Formatted response or generator for streaming
+ """
try:
# Convert messages from Claude format to ZhipuAI format
messages = self._convert_messages_to_zhipu_format(messages)
@@ -214,6 +239,7 @@ }
def _handle_sync_response(self, request_params):
+ """Handle synchronous ZhipuAI API response"""
try:
response = self.client.chat.completions.create(**request_params)
@@ -250,6 +276,7 @@ }
def _handle_stream_response(self, request_params):
+ """Handle streaming ZhipuAI API response"""
try:
stream = self.client.chat.completions.create(**request_params)
@@ -320,6 +347,12 @@ }
def _convert_tools_to_zhipu_format(self, tools):
+ """
+ Convert tools from Claude format to ZhipuAI format
+
+ Claude format: {name, description, input_schema}
+ ZhipuAI format: {type: "function", function: {name, description, parameters}}
+ """
if not tools:
return None
@@ -342,6 +375,12 @@ return zhipu_tools
def _convert_messages_to_zhipu_format(self, messages):
+ """
+ Convert messages from Claude format to ZhipuAI format
+
+ Claude uses content blocks with types like 'tool_use', 'tool_result'
+ ZhipuAI uses 'tool_calls' in assistant messages and 'tool' role for results
+ """
if not messages:
return []
@@ -408,6 +447,7 @@ return zhipu_messages
def _convert_tool_calls_to_openai_format(self, tool_calls):
+ """Convert ZhipuAI tool_calls to OpenAI format"""
if not tool_calls:
return None
@@ -422,4 +462,4 @@ }
})
- return openai_tool_calls+ return openai_tool_calls
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/zhipuai/zhipuai_bot.py |
Generate docstrings with parameter types | # encoding:utf-8
import copy
import json
import logging
import os
import pickle
from common.log import logger
# 将所有可用的配置项写在字典里, 请使用小写字母
# 此处的配置值无实际意义,程序不会读取此处的配置,仅用于提示格式,请将配置加入到config.json中
available_setting = {
# openai api配置
"open_ai_api_key": "", # openai api key
# openai apibase,当use_azure_chatgpt为true时,需要设置对应的api base
"open_ai_api_base": "https://api.openai.com/v1",
"claude_api_base": "https://api.anthropic.com/v1", # claude api base
"gemini_api_base": "https://generativelanguage.googleapis.com", # gemini api base
"proxy": "", # openai使用的代理
# chatgpt模型, 当use_azure_chatgpt为true时,其名称为Azure上model deployment名称
"model": "gpt-3.5-turbo", # 可选择: gpt-4o, pt-4o-mini, gpt-4-turbo, claude-3-sonnet, wenxin, moonshot, qwen-turbo, xunfei, glm-4, minimax, gemini等模型,全部可选模型详见common/const.py文件
"bot_type": "", # 可选配置,使用兼容openai格式的三方服务时候,需填"openai"(历史值"chatGPT"仍兼容)。bot具体名称详见common/const.py文件,如不填根据model名称判断
"use_azure_chatgpt": False, # 是否使用azure的chatgpt
"azure_deployment_id": "", # azure 模型部署名称
"azure_api_version": "", # azure api版本
# Bot触发配置
"single_chat_prefix": ["bot", "@bot"], # 私聊时文本需要包含该前缀才能触发机器人回复
"single_chat_reply_prefix": "[bot] ", # 私聊时自动回复的前缀,用于区分真人
"single_chat_reply_suffix": "", # 私聊时自动回复的后缀,\n 可以换行
"group_chat_prefix": ["@bot"], # 群聊时包含该前缀则会触发机器人回复
"no_need_at": False, # 群聊回复时是否不需要艾特
"group_chat_reply_prefix": "", # 群聊时自动回复的前缀
"group_chat_reply_suffix": "", # 群聊时自动回复的后缀,\n 可以换行
"group_chat_keyword": [], # 群聊时包含该关键词则会触发机器人回复
"group_at_off": False, # 是否关闭群聊时@bot的触发
"group_name_white_list": ["ChatGPT测试群", "ChatGPT测试群2"], # 开启自动回复的群名称列表
"group_name_keyword_white_list": [], # 开启自动回复的群名称关键词列表
"group_chat_in_one_session": ["ChatGPT测试群"], # 支持会话上下文共享的群名称
"group_shared_session": False, # 群聊是否共享会话上下文(所有成员共享)。False时每个用户在群内有独立会话
"nick_name_black_list": [], # 用户昵称黑名单
"group_welcome_msg": "", # 配置新人进群固定欢迎语,不配置则使用随机风格欢迎
"trigger_by_self": False, # 是否允许机器人触发
"text_to_image": "dall-e-2", # 图片生成模型,可选 dall-e-2, dall-e-3
# Azure OpenAI dall-e-3 配置
"dalle3_image_style": "vivid", # 图片生成dalle3的风格,可选有 vivid, natural
"dalle3_image_quality": "hd", # 图片生成dalle3的质量,可选有 standard, hd
# Azure OpenAI DALL-E API 配置, 当use_azure_chatgpt为true时,用于将文字回复的资源和Dall-E的资源分开.
"azure_openai_dalle_api_base": "", # [可选] azure openai 用于回复图片的资源 endpoint,默认使用 open_ai_api_base
"azure_openai_dalle_api_key": "", # [可选] azure openai 用于回复图片的资源 key,默认使用 open_ai_api_key
"azure_openai_dalle_deployment_id":"", # [可选] azure openai 用于回复图片的资源 deployment id,默认使用 text_to_image
"image_proxy": True, # 是否需要图片代理,国内访问LinkAI时需要
"image_create_prefix": ["画", "看", "找"], # 开启图片回复的前缀
"concurrency_in_session": 1, # 同一会话最多有多少条消息在处理中,大于1可能乱序
"image_create_size": "256x256", # 图片大小,可选有 256x256, 512x512, 1024x1024 (dall-e-3默认为1024x1024)
"group_chat_exit_group": False,
# chatgpt会话参数
"expires_in_seconds": 3600, # 无操作会话的过期时间
# 人格描述
"character_desc": "你是ChatGPT, 一个由OpenAI训练的大型语言模型, 你旨在回答并解决人们的任何问题,并且可以使用多种语言与人交流。",
"conversation_max_tokens": 1000, # 支持上下文记忆的最多字符数
# chatgpt限流配置
"rate_limit_chatgpt": 20, # chatgpt的调用频率限制
"rate_limit_dalle": 50, # openai dalle的调用频率限制
# chatgpt api参数 参考https://platform.openai.com/docs/api-reference/chat/create
"temperature": 0.9,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"request_timeout": 180, # chatgpt请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": 120, # chatgpt重试超时时间,在这个时间内,将会自动重试
# Baidu 文心一言参数
"baidu_wenxin_model": "eb-instant", # 默认使用ERNIE-Bot-turbo模型
"baidu_wenxin_api_key": "", # Baidu api key
"baidu_wenxin_secret_key": "", # Baidu secret key
"baidu_wenxin_prompt_enabled": False, # Enable prompt if you are using ernie character model
# 讯飞星火API
"xunfei_app_id": "", # 讯飞应用ID
"xunfei_api_key": "", # 讯飞 API key
"xunfei_api_secret": "", # 讯飞 API secret
"xunfei_domain": "", # 讯飞模型对应的domain参数,Spark4.0 Ultra为 4.0Ultra,其他模型详见: https://www.xfyun.cn/doc/spark/Web.html
"xunfei_spark_url": "", # 讯飞模型对应的请求地址,Spark4.0 Ultra为 wss://spark-api.xf-yun.com/v4.0/chat,其他模型参考详见: https://www.xfyun.cn/doc/spark/Web.html
# claude 配置
"claude_api_cookie": "",
"claude_uuid": "",
# claude api key
"claude_api_key": "",
# 通义千问API, 获取方式查看文档 https://help.aliyun.com/document_detail/2587494.html
"qwen_access_key_id": "",
"qwen_access_key_secret": "",
"qwen_agent_key": "",
"qwen_app_id": "",
"qwen_node_id": "", # 流程编排模型用到的id,如果没有用到qwen_node_id,请务必保持为空字符串
# 阿里灵积(通义新版sdk)模型api key
"dashscope_api_key": "",
# Google Gemini Api Key
"gemini_api_key": "",
# 语音设置
"speech_recognition": True, # 是否开启语音识别
"group_speech_recognition": False, # 是否开启群组语音识别
"voice_reply_voice": False, # 是否使用语音回复语音,需要设置对应语音合成引擎的api key
"always_reply_voice": False, # 是否一直使用语音回复
"voice_to_text": "openai", # 语音识别引擎,支持openai,baidu,google,azure,xunfei,ali
"text_to_voice": "openai", # 语音合成引擎,支持openai,baidu,google,azure,xunfei,ali,pytts(offline),elevenlabs,edge(online)
"text_to_voice_model": "tts-1",
"tts_voice_id": "alloy",
# baidu 语音api配置, 使用百度语音识别和语音合成时需要
"baidu_app_id": "",
"baidu_api_key": "",
"baidu_secret_key": "",
# 1536普通话(支持简单的英文识别) 1737英语 1637粤语 1837四川话 1936普通话远场
"baidu_dev_pid": 1536,
# azure 语音api配置, 使用azure语音识别和语音合成时需要
"azure_voice_api_key": "",
"azure_voice_region": "japaneast",
# elevenlabs 语音api配置
"xi_api_key": "", # 获取ap的方法可以参考https://docs.elevenlabs.io/api-reference/quick-start/authentication
"xi_voice_id": "", # ElevenLabs提供了9种英式、美式等英语发音id,分别是“Adam/Antoni/Arnold/Bella/Domi/Elli/Josh/Rachel/Sam”
# 服务时间限制
"chat_time_module": False, # 是否开启服务时间限制
"chat_start_time": "00:00", # 服务开始时间
"chat_stop_time": "24:00", # 服务结束时间
# 翻译api
"translate": "baidu", # 翻译api,支持baidu
# baidu翻译api的配置
"baidu_translate_app_id": "", # 百度翻译api的appid
"baidu_translate_app_key": "", # 百度翻译api的秘钥
# wechatmp的配置
"wechatmp_token": "", # 微信公众平台的Token
"wechatmp_port": 8080, # 微信公众平台的端口,需要端口转发到80或443
"wechatmp_app_id": "", # 微信公众平台的appID
"wechatmp_app_secret": "", # 微信公众平台的appsecret
"wechatmp_aes_key": "", # 微信公众平台的EncodingAESKey,加密模式需要
# wechatcom的通用配置
"wechatcom_corp_id": "", # 企业微信公司的corpID
# wechatcomapp的配置
"wechatcomapp_token": "", # 企业微信app的token
"wechatcomapp_port": 9898, # 企业微信app的服务端口,不需要端口转发
"wechatcomapp_secret": "", # 企业微信app的secret
"wechatcomapp_agent_id": "", # 企业微信app的agent_id
"wechatcomapp_aes_key": "", # 企业微信app的aes_key
# 飞书配置
"feishu_port": 80, # 飞书bot监听端口
"feishu_app_id": "", # 飞书机器人应用APP Id
"feishu_app_secret": "", # 飞书机器人APP secret
"feishu_token": "", # 飞书 verification token
"feishu_bot_name": "", # 飞书机器人的名字
"feishu_event_mode": "websocket", # 飞书事件接收模式: webhook(HTTP服务器) 或 websocket(长连接)
# 钉钉配置
"dingtalk_client_id": "", # 钉钉机器人Client ID
"dingtalk_client_secret": "", # 钉钉机器人Client Secret
"dingtalk_card_enabled": False,
# 企微智能机器人配置(长连接模式)
"wecom_bot_id": "", # 企微智能机器人BotID
"wecom_bot_secret": "", # 企微智能机器人长连接Secret
# chatgpt指令自定义触发词
"clear_memory_commands": ["#清除记忆"], # 重置会话指令,必须以#开头
# channel配置
"channel_type": "", # 通道类型,支持多渠道同时运行。单个: "feishu",多个: "feishu, dingtalk" 或 ["feishu", "dingtalk"]。可选值: web,feishu,dingtalk,wecom_bot,wechatmp,wechatmp_service,wechatcom_app
"web_console": True, # 是否自动启动Web控制台(默认启动)。设为False可禁用
"subscribe_msg": "", # 订阅消息, 支持: wechatmp, wechatmp_service, wechatcom_app
"debug": False, # 是否开启debug模式,开启后会打印更多日志
"appdata_dir": "", # 数据目录
# 插件配置
"plugin_trigger_prefix": "$", # 规范插件提供聊天相关指令的前缀,建议不要和管理员指令前缀"#"冲突
# 是否使用全局插件配置
"use_global_plugin_config": False,
"max_media_send_count": 3, # 单次最大发送媒体资源的个数
"media_send_interval": 1, # 发送图片的事件间隔,单位秒
# 智谱AI 平台配置
"zhipu_ai_api_key": "",
"zhipu_ai_api_base": "https://open.bigmodel.cn/api/paas/v4",
"moonshot_api_key": "",
"moonshot_base_url": "https://api.moonshot.cn/v1",
# 豆包(火山方舟) 平台配置
"ark_api_key": "",
"ark_base_url": "https://ark.cn-beijing.volces.com/api/v3",
#魔搭社区 平台配置
"modelscope_api_key": "",
"modelscope_base_url": "https://api-inference.modelscope.cn/v1/chat/completions",
# LinkAI平台配置
"use_linkai": False,
"linkai_api_key": "",
"linkai_app_code": "",
"linkai_api_base": "https://api.link-ai.tech", # linkAI服务地址
"cloud_host": "client.link-ai.tech",
"cloud_deployment_id": "",
"minimax_api_key": "",
"Minimax_group_id": "",
"Minimax_base_url": "",
"web_port": 9899,
"agent": True, # 是否开启Agent模式
"agent_workspace": "~/cow", # agent工作空间路径,用于存储skills、memory等
"agent_max_context_tokens": 50000, # Agent模式下最大上下文tokens
"agent_max_context_turns": 30, # Agent模式下最大上下文记忆轮次
"agent_max_steps": 15, # Agent模式下单次运行最大决策步数
}
class Config(dict):
def __init__(self, d=None):
super().__init__()
if d is None:
d = {}
for k, v in d.items():
self[k] = v
# user_datas: 用户数据,key为用户名,value为用户数据,也是dict
self.user_datas = {}
def __getitem__(self, key):
# 跳过以下划线开头的注释字段
if not key.startswith("_") and key not in available_setting:
logger.warning("[Config] key '{}' not in available_setting, may not take effect".format(key))
return super().__getitem__(key)
def __setitem__(self, key, value):
# 跳过以下划线开头的注释字段
if not key.startswith("_") and key not in available_setting:
logger.warning("[Config] key '{}' not in available_setting, may not take effect".format(key))
return super().__setitem__(key, value)
def get(self, key, default=None):
# 跳过以下划线开头的注释字段
if key.startswith("_"):
return super().get(key, default)
# 如果key不在available_setting中,直接返回default
if key not in available_setting:
return super().get(key, default)
try:
return self[key]
except KeyError as e:
return default
except Exception as e:
raise e
# Make sure to return a dictionary to ensure atomic
def get_user_data(self, user) -> dict:
if self.user_datas.get(user) is None:
self.user_datas[user] = {}
return self.user_datas[user]
def load_user_datas(self):
try:
with open(os.path.join(get_appdata_dir(), "user_datas.pkl"), "rb") as f:
self.user_datas = pickle.load(f)
logger.debug("[Config] User datas loaded.")
except FileNotFoundError as e:
logger.debug("[Config] User datas file not found, ignore.")
except Exception as e:
logger.warning("[Config] User datas error: {}".format(e))
self.user_datas = {}
def save_user_datas(self):
try:
with open(os.path.join(get_appdata_dir(), "user_datas.pkl"), "wb") as f:
pickle.dump(self.user_datas, f)
logger.info("[Config] User datas saved.")
except Exception as e:
logger.info("[Config] User datas error: {}".format(e))
config = Config()
def drag_sensitive(config):
try:
if isinstance(config, str):
conf_dict: dict = json.loads(config)
conf_dict_copy = copy.deepcopy(conf_dict)
for key in conf_dict_copy:
if "key" in key or "secret" in key:
if isinstance(conf_dict_copy[key], str):
conf_dict_copy[key] = conf_dict_copy[key][0:3] + "*" * 5 + conf_dict_copy[key][-3:]
return json.dumps(conf_dict_copy, indent=4)
elif isinstance(config, dict):
config_copy = copy.deepcopy(config)
for key in config:
if "key" in key or "secret" in key:
if isinstance(config_copy[key], str):
config_copy[key] = config_copy[key][0:3] + "*" * 5 + config_copy[key][-3:]
return config_copy
except Exception as e:
logger.exception(e)
return config
return config
def load_config():
global config
# 打印 ASCII Logo
logger.info(" ____ _ _ ")
logger.info(" / ___|_____ __ / \\ __ _ ___ _ __ | |_ ")
logger.info("| | / _ \\ \\ /\\ / // _ \\ / _` |/ _ \\ '_ \\| __|")
logger.info("| |__| (_) \\ V V // ___ \\ (_| | __/ | | | |_ ")
logger.info(" \\____\\___/ \\_/\\_//_/ \\_\\__, |\\___|_| |_|\\__|")
logger.info(" |___/ ")
logger.info("")
config_path = "./config.json"
if not os.path.exists(config_path):
logger.info("配置文件不存在,将使用config-template.json模板")
config_path = "./config-template.json"
config_str = read_file(config_path)
logger.debug("[INIT] config str: {}".format(drag_sensitive(config_str)))
# 将json字符串反序列化为dict类型
config = Config(json.loads(config_str))
# override config with environment variables.
# Some online deployment platforms (e.g. Railway) deploy project from github directly. So you shouldn't put your secrets like api key in a config file, instead use environment variables to override the default config.
for name, value in os.environ.items():
name = name.lower()
# 跳过以下划线开头的注释字段
if name.startswith("_"):
continue
if name in available_setting:
logger.info("[INIT] override config by environ args: {}={}".format(name, value))
try:
config[name] = eval(value)
except Exception:
if value == "false":
config[name] = False
elif value == "true":
config[name] = True
else:
config[name] = value
if config.get("debug", False):
logger.setLevel(logging.DEBUG)
logger.debug("[INIT] set log level to DEBUG")
logger.info("[INIT] load config: {}".format(drag_sensitive(config)))
# 打印系统初始化信息
logger.info("[INIT] ========================================")
logger.info("[INIT] System Initialization")
logger.info("[INIT] ========================================")
logger.info("[INIT] Channel: {}".format(config.get("channel_type", "unknown")))
logger.info("[INIT] Model: {}".format(config.get("model", "unknown")))
# Agent模式信息
if config.get("agent", False):
workspace = config.get("agent_workspace", "~/cow")
logger.info("[INIT] Mode: Agent (workspace: {})".format(workspace))
else:
logger.info("[INIT] Mode: Chat (在config.json中设置 \"agent\":true 可启用Agent模式)")
logger.info("[INIT] Debug: {}".format(config.get("debug", False)))
logger.info("[INIT] ========================================")
# Sync selected config values to environment variables so that
# subprocesses (e.g. shell skill scripts) can access them directly.
# Existing env vars are NOT overwritten (env takes precedence).
_CONFIG_TO_ENV = {
"open_ai_api_key": "OPENAI_API_KEY",
"open_ai_api_base": "OPENAI_API_BASE",
"linkai_api_key": "LINKAI_API_KEY",
"linkai_api_base": "LINKAI_API_BASE",
"claude_api_key": "CLAUDE_API_KEY",
"claude_api_base": "CLAUDE_API_BASE",
"gemini_api_key": "GEMINI_API_KEY",
"gemini_api_base": "GEMINI_API_BASE",
"minimax_api_key": "MINIMAX_API_KEY",
"minimax_api_base": "MINIMAX_API_BASE",
"zhipu_ai_api_key": "ZHIPU_AI_API_KEY",
"zhipu_ai_api_base": "ZHIPU_AI_API_BASE",
"moonshot_api_key": "MOONSHOT_API_KEY",
"moonshot_api_base": "MOONSHOT_API_BASE",
"ark_api_key": "ARK_API_KEY",
"ark_api_base": "ARK_API_BASE",
# Channel credentials (used by skills that check env vars)
"feishu_app_id": "FEISHU_APP_ID",
"feishu_app_secret": "FEISHU_APP_SECRET",
"dingtalk_client_id": "DINGTALK_CLIENT_ID",
"dingtalk_client_secret": "DINGTALK_CLIENT_SECRET",
"wechatmp_app_id": "WECHATMP_APP_ID",
"wechatmp_app_secret": "WECHATMP_APP_SECRET",
"wechatcomapp_agent_id": "WECHATCOMAPP_AGENT_ID",
"wechatcomapp_secret": "WECHATCOMAPP_SECRET",
"qq_app_id": "QQ_APP_ID",
"qq_app_secret": "QQ_APP_SECRET"
}
injected = 0
for conf_key, env_key in _CONFIG_TO_ENV.items():
if env_key not in os.environ:
val = config.get(conf_key, "")
if val:
os.environ[env_key] = str(val)
injected += 1
if injected:
logger.info("[INIT] Synced {} config values to environment variables".format(injected))
config.load_user_datas()
def get_root():
return os.path.dirname(os.path.abspath(__file__))
def read_file(path):
with open(path, mode="r", encoding="utf-8") as f:
return f.read()
def conf():
return config
def get_appdata_dir():
data_path = os.path.join(get_root(), conf().get("appdata_dir", ""))
if not os.path.exists(data_path):
logger.info("[INIT] data path not exists, create it: {}".format(data_path))
os.makedirs(data_path)
return data_path
def subscribe_msg():
trigger_prefix = conf().get("single_chat_prefix", [""])[0]
msg = conf().get("subscribe_msg", "")
return msg.format(trigger_prefix=trigger_prefix)
# global plugin config
plugin_config = {}
def write_plugin_config(pconf: dict):
global plugin_config
for k in pconf:
plugin_config[k.lower()] = pconf[k]
def remove_plugin_config(name: str):
global plugin_config
plugin_config.pop(name.lower(), None)
def pconf(plugin_name: str) -> dict:
return plugin_config.get(plugin_name.lower())
# 全局配置,用于存放全局生效的状态
global_config = {"admin_users": []} | --- +++ @@ -429,18 +429,31 @@
def write_plugin_config(pconf: dict):
+ """
+ 写入插件全局配置
+ :param pconf: 全量插件配置
+ """
global plugin_config
for k in pconf:
plugin_config[k.lower()] = pconf[k]
def remove_plugin_config(name: str):
+ """
+ 移除待重新加载的插件全局配置
+ :param name: 待重载的插件名
+ """
global plugin_config
plugin_config.pop(name.lower(), None)
def pconf(plugin_name: str) -> dict:
+ """
+ 根据插件名称获取配置
+ :param plugin_name: 插件名称
+ :return: 该插件的配置项
+ """
return plugin_config.get(plugin_name.lower())
# 全局配置,用于存放全局生效的状态
-global_config = {"admin_users": []}+global_config = {"admin_users": []}
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/config.py |
Create simple docstrings for beginners | # encoding:utf-8
import base64
import json
import mimetypes
import os
import re
import time
import requests
from models.bot import Bot
from models.session_manager import SessionManager
from bridge.context import ContextType, Context
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
from models.chatgpt.chat_gpt_session import ChatGPTSession
from models.baidu.baidu_wenxin_session import BaiduWenxinSession
# OpenAI对话模型API (可用)
class GoogleGeminiBot(Bot):
def __init__(self):
super().__init__()
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
@property
def api_key(self):
return conf().get("gemini_api_key")
@property
def api_base(self):
base = conf().get("gemini_api_base", "").strip()
if base:
return base.rstrip('/')
return "https://generativelanguage.googleapis.com"
def reply(self, query, context: Context = None) -> Reply:
session_id = None
try:
if context.type != ContextType.TEXT:
logger.warn(f"[Gemini] Unsupported message type, type={context.type}")
return Reply(ReplyType.TEXT, None)
logger.info(f"[Gemini] query={query}")
session_id = context["session_id"]
session = self.sessions.session_query(query, session_id)
filtered_messages = self.filter_messages(session.messages)
logger.debug(f"[Gemini] messages={filtered_messages}")
response = self.call_with_tools(
messages=filtered_messages,
tools=None,
stream=False,
model=self.model
)
if isinstance(response, dict) and response.get("error"):
error_message = response.get("message", "Failed to invoke [Gemini] api!")
logger.error(f"[Gemini] API error: {error_message}")
self.sessions.session_reply(error_message, session_id)
return Reply(ReplyType.ERROR, error_message)
choices = response.get("choices", []) if isinstance(response, dict) else []
if choices and choices[0].get("message"):
reply_text = choices[0]["message"].get("content")
if reply_text:
logger.info(f"[Gemini] reply={reply_text}")
self.sessions.session_reply(reply_text, session_id)
return Reply(ReplyType.TEXT, reply_text)
logger.warning("[Gemini] No valid response generated. Checking safety ratings.")
safety_ratings = response.get("safety_ratings", []) if isinstance(response, dict) else []
if safety_ratings:
for rating in safety_ratings:
category = rating.get("category", "UNKNOWN")
probability = rating.get("probability", "UNKNOWN")
logger.warning(f"[Gemini] Safety rating: {category} - {probability}")
error_message = "No valid response generated due to safety constraints."
self.sessions.session_reply(error_message, session_id)
return Reply(ReplyType.ERROR, error_message)
except Exception as e:
logger.error(f"[Gemini] Error generating response: {str(e)}", exc_info=True)
error_message = "Failed to invoke [Gemini] api!"
if session_id:
self.sessions.session_reply(error_message, session_id)
return Reply(ReplyType.ERROR, error_message)
def _convert_to_gemini_messages(self, messages: list):
res = []
for msg in messages:
if msg.get("role") == "user":
role = "user"
elif msg.get("role") == "assistant":
role = "model"
elif msg.get("role") == "system":
role = "user"
else:
continue
res.append({
"role": role,
"parts": [{"text": msg.get("content")}]
})
return res
@staticmethod
def filter_messages(messages: list):
res = []
turn = "user"
if not messages:
return res
for i in range(len(messages) - 1, -1, -1):
message = messages[i]
role = message.get("role")
if role == "system":
res.insert(0, message)
continue
if role != turn:
continue
res.insert(0, message)
if turn == "user":
turn = "assistant"
elif turn == "assistant":
turn = "user"
return res
@staticmethod
def _extract_image_paths_from_text(content: str):
if not isinstance(content, str):
return "", []
pattern = r"\[图片:\s*([^\]]+)\]"
image_paths = [m.strip().strip("'\"") for m in re.findall(pattern, content) if m.strip()]
cleaned_text = re.sub(pattern, "", content)
cleaned_text = re.sub(r"\n{3,}", "\n\n", cleaned_text).strip()
return cleaned_text, image_paths
@staticmethod
def _build_image_inline_part(image_path: str):
if not image_path:
return None
try:
if image_path.startswith("file://"):
image_path = image_path[7:]
image_path = os.path.expanduser(image_path)
if not os.path.exists(image_path):
logger.warning(f"[Gemini] Image file not found: {image_path}")
return None
with open(image_path, "rb") as f:
image_bytes = f.read()
mime_type = mimetypes.guess_type(image_path)[0] or "image/png"
if not mime_type.startswith("image/"):
mime_type = "image/png"
return {
"inlineData": {
"mimeType": mime_type,
"data": base64.b64encode(image_bytes).decode("utf-8")
}
}
except Exception as e:
logger.warning(f"[Gemini] Failed to build inline image part from path={image_path}, err={e}")
return None
@staticmethod
def _build_inline_part_from_image_url(image_url):
if not image_url:
return None
if isinstance(image_url, dict):
image_url = image_url.get("url")
if not image_url or not isinstance(image_url, str):
return None
if image_url.startswith("data:"):
match = re.match(r"^data:([^;]+);base64,(.+)$", image_url, re.DOTALL)
if not match:
logger.warning("[Gemini] Invalid data URL for image block")
return None
return {
"inlineData": {
"mimeType": match.group(1),
"data": match.group(2).strip()
}
}
if image_url.startswith("file://") or os.path.exists(os.path.expanduser(image_url)):
return GoogleGeminiBot._build_image_inline_part(image_url)
if image_url.startswith("http://") or image_url.startswith("https://"):
try:
response = requests.get(image_url, timeout=20)
if response.status_code != 200:
logger.warning(f"[Gemini] Failed to fetch remote image: status={response.status_code}, url={image_url}")
return None
mime_type = response.headers.get("Content-Type", "image/png").split(";")[0].strip()
if not mime_type.startswith("image/"):
mime_type = "image/png"
return {
"inlineData": {
"mimeType": mime_type,
"data": base64.b64encode(response.content).decode("utf-8")
}
}
except Exception as e:
logger.warning(f"[Gemini] Failed to download remote image: url={image_url}, err={e}")
return None
logger.warning(f"[Gemini] Unsupported image URL format: {image_url[:120]}")
return None
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
try:
model_name = kwargs.get("model", self.model or "gemini-1.5-flash")
# Build REST API payload
payload = {"contents": []}
inline_image_count = 0
# Keep legacy behavior: disable Gemini safety blocking like old SDK path.
payload["safetySettings"] = [
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
]
# Extract and set system instruction
system_prompt = kwargs.get("system", "")
if not system_prompt:
for msg in messages:
if msg.get("role") == "system":
system_prompt = msg["content"]
break
if system_prompt:
payload["system_instruction"] = {
"parts": [{"text": system_prompt}]
}
# Convert messages to Gemini format
for msg in messages:
role = msg.get("role")
content = msg.get("content", "")
if role == "system":
continue
# Convert role
gemini_role = "user" if role in ["user", "tool"] else "model"
# Handle different content formats
parts = []
if isinstance(content, str):
# Text with optional [图片: /path/to/file] markers
cleaned_text, image_paths = self._extract_image_paths_from_text(content)
if cleaned_text:
parts.append({"text": cleaned_text})
image_added = False
for image_path in image_paths:
image_part = self._build_image_inline_part(image_path)
if image_part:
parts.append(image_part)
image_added = True
inline_image_count += 1
if not cleaned_text and not image_added and content:
parts.append({"text": content})
elif isinstance(content, list):
# List of content blocks (Claude format)
for block in content:
if not isinstance(block, dict):
if isinstance(block, str):
parts.append({"text": block})
continue
block_type = block.get("type")
if block_type == "text":
# Text block with optional image markers
block_text = block.get("text", "")
cleaned_text, image_paths = self._extract_image_paths_from_text(block_text)
if cleaned_text:
parts.append({"text": cleaned_text})
for image_path in image_paths:
image_part = self._build_image_inline_part(image_path)
if image_part:
parts.append(image_part)
elif block_type in ["image", "image_url"]:
# OpenAI format: {"type":"image_url","image_url":{"url":"..."}}
# Claude format: {"type":"image","source":{"type":"base64","media_type":"...","data":"..."}}
image_part = None
if block_type == "image":
source = block.get("source", {})
if isinstance(source, dict) and source.get("type") == "base64" and source.get("data"):
image_part = {
"inlineData": {
"mimeType": source.get("media_type", "image/png"),
"data": source.get("data")
}
}
elif block.get("image_url"):
image_part = self._build_inline_part_from_image_url(block.get("image_url"))
else:
image_part = self._build_inline_part_from_image_url(block.get("image_url"))
if image_part:
parts.append(image_part)
inline_image_count += 1
else:
logger.warning(f"[Gemini] Skip invalid image block: {str(block)[:200]}")
elif block_type == "tool_result":
# Convert Claude tool_result to Gemini functionResponse
tool_use_id = block.get("tool_use_id")
tool_content = block.get("content", "")
# Try to parse tool content as JSON
try:
if isinstance(tool_content, str):
tool_result_data = json.loads(tool_content)
else:
tool_result_data = tool_content
except Exception:
tool_result_data = {"result": tool_content}
# Find the tool name from previous messages
# Look for the corresponding tool_call in model's message
tool_name = None
for prev_msg in reversed(messages):
if prev_msg.get("role") == "assistant":
prev_content = prev_msg.get("content", [])
if isinstance(prev_content, list):
for prev_block in prev_content:
if isinstance(prev_block, dict) and prev_block.get("type") == "tool_use":
if prev_block.get("id") == tool_use_id:
tool_name = prev_block.get("name")
break
if tool_name:
break
# Gemini functionResponse format
parts.append({
"functionResponse": {
"name": tool_name or "unknown",
"response": tool_result_data
}
})
elif "text" in block:
# Generic text field
parts.append({"text": block["text"]})
if parts:
payload["contents"].append({
"role": gemini_role,
"parts": parts
})
if inline_image_count > 0:
logger.info(f"[Gemini] Multimodal request includes {inline_image_count} image part(s)")
# Generation config
gen_config = {}
if kwargs.get("temperature") is not None:
gen_config["temperature"] = kwargs["temperature"]
if gen_config:
payload["generationConfig"] = gen_config
# Convert tools to Gemini format (REST API style)
if tools:
gemini_tools = self._convert_tools_to_gemini_rest_format(tools)
if gemini_tools:
payload["tools"] = gemini_tools
# Make REST API call
base_url = f"{self.api_base}/v1beta"
endpoint = f"{base_url}/models/{model_name}:generateContent"
if stream:
endpoint = f"{base_url}/models/{model_name}:streamGenerateContent?alt=sse"
headers = {
"x-goog-api-key": self.api_key,
"Content-Type": "application/json"
}
response = requests.post(
endpoint,
headers=headers,
json=payload,
stream=stream,
timeout=60
)
# Check HTTP status for stream mode (for non-stream, it's checked in handler)
if stream and response.status_code != 200:
error_text = response.text
logger.error(f"[Gemini] API error ({response.status_code}): {error_text}")
def error_generator():
yield {
"error": True,
"message": f"Gemini API error: {error_text}",
"status_code": response.status_code
}
return error_generator()
if stream:
return self._handle_gemini_rest_stream_response(response, model_name)
else:
return self._handle_gemini_rest_sync_response(response, model_name)
except Exception as e:
logger.error(f"[Gemini] call_with_tools error: {e}", exc_info=True)
error_msg = str(e) # Capture error message before creating generator
if stream:
def error_generator():
yield {
"error": True,
"message": error_msg,
"status_code": 500
}
return error_generator()
else:
return {
"error": True,
"message": str(e),
"status_code": 500
}
def _convert_tools_to_gemini_rest_format(self, tools_list):
function_declarations = []
for tool in tools_list:
# Extract name, description, and parameters based on format
if tool.get("type") == "function":
# OpenAI format: {"type": "function", "function": {...}}
func = tool.get("function", {})
name = func.get("name")
description = func.get("description", "")
parameters = func.get("parameters", {})
else:
# Claude/Agent format: {"name": "...", "description": "...", "input_schema": {...}}
name = tool.get("name")
description = tool.get("description", "")
parameters = tool.get("input_schema", {})
if not name:
logger.warning(f"[Gemini] Skipping tool without name: {tool}")
continue
function_declarations.append({
"name": name,
"description": description,
"parameters": parameters
})
# All functionDeclarations must be in a single tools object (per Gemini REST API spec)
return [{
"functionDeclarations": function_declarations
}] if function_declarations else []
def _handle_gemini_rest_sync_response(self, response, model_name):
try:
if response.status_code != 200:
error_text = response.text
logger.error(f"[Gemini] API error ({response.status_code}): {error_text}")
return {
"error": True,
"message": f"Gemini API error: {error_text}",
"status_code": response.status_code
}
data = response.json()
logger.debug(f"[Gemini] Response data: {json.dumps(data, ensure_ascii=False)[:500]}")
# Extract from Gemini response format
candidates = data.get("candidates", [])
if not candidates:
logger.warning("[Gemini] No candidates in response")
prompt_feedback = data.get("promptFeedback", {})
return {
"error": True,
"message": "No candidates in response",
"status_code": 500,
"safety_ratings": prompt_feedback.get("safetyRatings", [])
}
candidate = candidates[0]
content = candidate.get("content", {})
parts = content.get("parts", [])
safety_ratings = candidate.get("safetyRatings", [])
logger.debug(f"[Gemini] Candidate parts count: {len(parts)}")
# Extract text and function calls
text_content = ""
tool_calls = []
for part in parts:
# Check for text
if "text" in part:
text_content += part["text"]
logger.debug(f"[Gemini] Text part: {part['text'][:100]}...")
# Check for functionCall (per REST API docs)
if "functionCall" in part:
fc = part["functionCall"]
logger.info(f"[Gemini] Function call detected: {fc.get('name')}")
tool_calls.append({
"id": f"call_{int(time.time() * 1000000)}",
"type": "function",
"function": {
"name": fc.get("name"),
"arguments": json.dumps(fc.get("args", {}))
}
})
logger.info(f"[Gemini] Response: text={len(text_content)} chars, tool_calls={len(tool_calls)}")
# Build OpenAI format response
message_dict = {
"role": "assistant",
"content": text_content or None
}
if tool_calls:
message_dict["tool_calls"] = tool_calls
return {
"id": f"chatcmpl-{time.time()}",
"object": "chat.completion",
"created": int(time.time()),
"model": model_name,
"choices": [{
"index": 0,
"message": message_dict,
"finish_reason": "tool_calls" if tool_calls else "stop"
}],
"usage": data.get("usageMetadata", {}),
"safety_ratings": safety_ratings
}
except Exception as e:
logger.error(f"[Gemini] sync response error: {e}", exc_info=True)
return {
"error": True,
"message": str(e),
"status_code": 500
}
def _handle_gemini_rest_stream_response(self, response, model_name):
try:
all_tool_calls = []
has_sent_tool_calls = False
has_content = False # Track if any content was sent
chunk_count = 0
last_finish_reason = None
last_safety_ratings = None
for line in response.iter_lines():
if not line:
continue
line = line.decode('utf-8')
# Skip SSE prefixes
if line.startswith('data: '):
line = line[6:]
if not line or line == '[DONE]':
continue
try:
chunk_data = json.loads(line)
chunk_count += 1
candidates = chunk_data.get("candidates", [])
if not candidates:
logger.debug("[Gemini] No candidates in chunk")
continue
candidate = candidates[0]
# 记录 finish_reason 和 safety_ratings
if "finishReason" in candidate:
last_finish_reason = candidate["finishReason"]
if "safetyRatings" in candidate:
last_safety_ratings = candidate["safetyRatings"]
content = candidate.get("content", {})
parts = content.get("parts", [])
if not parts:
logger.debug("[Gemini] No parts in candidate content")
# Stream text content
for part in parts:
if "text" in part and part["text"]:
has_content = True
yield {
"id": f"chatcmpl-{time.time()}",
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": model_name,
"choices": [{
"index": 0,
"delta": {"content": part["text"]},
"finish_reason": None
}]
}
# Collect function calls
if "functionCall" in part:
fc = part["functionCall"]
logger.info(f"[Gemini] Function call: {fc.get('name')}")
all_tool_calls.append({
"index": len(all_tool_calls), # Add index to differentiate multiple tool calls
"id": f"call_{int(time.time() * 1000000)}_{len(all_tool_calls)}",
"type": "function",
"function": {
"name": fc.get("name"),
"arguments": json.dumps(fc.get("args", {}))
}
})
except json.JSONDecodeError as je:
logger.debug(f"[Gemini] JSON decode error: {je}")
continue
# Send tool calls if any were collected
if all_tool_calls and not has_sent_tool_calls:
yield {
"id": f"chatcmpl-{time.time()}",
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": model_name,
"choices": [{
"index": 0,
"delta": {"tool_calls": all_tool_calls},
"finish_reason": None
}]
}
has_sent_tool_calls = True
# 如果返回空响应,记录详细警告
if not has_content and not all_tool_calls:
logger.warning(f"[Gemini] ⚠️ Empty response detected!")
# Final chunk
yield {
"id": f"chatcmpl-{time.time()}",
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": model_name,
"choices": [{
"index": 0,
"delta": {},
"finish_reason": "tool_calls" if all_tool_calls else "stop"
}]
}
except Exception as e:
logger.error(f"[Gemini] stream response error: {e}", exc_info=True)
error_msg = str(e)
yield {
"error": True,
"message": error_msg,
"status_code": 500
}
def _convert_tools_to_gemini_format(self, openai_tools):
import google.generativeai as genai
gemini_functions = []
for tool in openai_tools:
if tool.get("type") == "function":
func = tool.get("function", {})
gemini_functions.append(
genai.protos.FunctionDeclaration(
name=func.get("name"),
description=func.get("description", ""),
parameters=func.get("parameters", {})
)
)
if gemini_functions:
return [genai.protos.Tool(function_declarations=gemini_functions)]
return None
def _handle_gemini_sync_response(self, model, messages, request_params, model_name):
import json
response = model.generate_content(messages, **request_params)
# Extract text content and function calls
text_content = ""
tool_calls = []
if response.candidates and response.candidates[0].content:
for part in response.candidates[0].content.parts:
if hasattr(part, 'text') and part.text:
text_content += part.text
elif hasattr(part, 'function_call') and part.function_call:
# Convert Gemini function call to OpenAI format
func_call = part.function_call
tool_calls.append({
"id": f"call_{hash(func_call.name)}",
"type": "function",
"function": {
"name": func_call.name,
"arguments": json.dumps(dict(func_call.args))
}
})
# Build message in OpenAI format
message = {
"role": "assistant",
"content": text_content
}
if tool_calls:
message["tool_calls"] = tool_calls
# Format response to match OpenAI structure
formatted_response = {
"id": f"gemini_{int(time.time())}",
"object": "chat.completion",
"created": int(time.time()),
"model": model_name,
"choices": [
{
"index": 0,
"message": message,
"finish_reason": "stop" if not tool_calls else "tool_calls"
}
],
"usage": {
"prompt_tokens": 0, # Gemini doesn't provide token counts in the same way
"completion_tokens": 0,
"total_tokens": 0
}
}
logger.info(f"[Gemini] call_with_tools reply, model={model_name}")
return formatted_response
def _handle_gemini_stream_response(self, model, messages, request_params, model_name):
import json
try:
response_stream = model.generate_content(messages, stream=True, **request_params)
for chunk in response_stream:
if chunk.candidates and chunk.candidates[0].content:
for part in chunk.candidates[0].content.parts:
if hasattr(part, 'text') and part.text:
# Text content
yield {
"id": f"gemini_{int(time.time())}",
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": model_name,
"choices": [{
"index": 0,
"delta": {"content": part.text},
"finish_reason": None
}]
}
elif hasattr(part, 'function_call') and part.function_call:
# Function call
func_call = part.function_call
yield {
"id": f"gemini_{int(time.time())}",
"object": "chat.completion.chunk",
"created": int(time.time()),
"model": model_name,
"choices": [{
"index": 0,
"delta": {
"tool_calls": [{
"index": 0,
"id": f"call_{hash(func_call.name)}",
"type": "function",
"function": {
"name": func_call.name,
"arguments": json.dumps(dict(func_call.args))
}
}]
},
"finish_reason": None
}]
}
except Exception as e:
logger.error(f"[Gemini] stream response error: {e}")
yield {
"error": True,
"message": str(e),
"status_code": 500
} | --- +++ @@ -1,3 +1,9 @@+"""
+Google gemini bot
+
+@author zhayujie
+@Date 2023/12/15
+"""
# encoding:utf-8
import base64
@@ -213,6 +219,18 @@ return None
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
+ """
+ Call Gemini API with tool support using REST API (following official docs)
+
+ Args:
+ messages: List of messages (OpenAI format)
+ tools: List of tool definitions (OpenAI/Claude format)
+ stream: Whether to use streaming
+ **kwargs: Additional parameters (system, max_tokens, temperature, etc.)
+
+ Returns:
+ Formatted response compatible with OpenAI format or generator for streaming
+ """
try:
model_name = kwargs.get("model", self.model or "gemini-1.5-flash")
@@ -434,6 +452,12 @@ }
def _convert_tools_to_gemini_rest_format(self, tools_list):
+ """
+ Convert tools to Gemini REST API format
+
+ Handles both OpenAI and Claude/Agent formats.
+ Returns: [{"functionDeclarations": [...]}]
+ """
function_declarations = []
for tool in tools_list:
@@ -466,6 +490,7 @@ }] if function_declarations else []
def _handle_gemini_rest_sync_response(self, response, model_name):
+ """Handle Gemini REST API sync response and convert to OpenAI format"""
try:
if response.status_code != 200:
error_text = response.text
@@ -555,6 +580,7 @@ }
def _handle_gemini_rest_stream_response(self, response, model_name):
+ """Handle Gemini REST API stream response"""
try:
all_tool_calls = []
has_sent_tool_calls = False
@@ -675,6 +701,7 @@ }
def _convert_tools_to_gemini_format(self, openai_tools):
+ """Convert OpenAI tool format to Gemini function declarations"""
import google.generativeai as genai
gemini_functions = []
@@ -694,6 +721,7 @@ return None
def _handle_gemini_sync_response(self, model, messages, request_params, model_name):
+ """Handle synchronous Gemini API response"""
import json
response = model.generate_content(messages, **request_params)
@@ -750,6 +778,7 @@ return formatted_response
def _handle_gemini_stream_response(self, model, messages, request_params, model_name):
+ """Handle streaming Gemini API response"""
import json
try:
@@ -802,4 +831,4 @@ "error": True,
"message": str(e),
"status_code": 500
- }+ }
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/gemini/google_gemini_bot.py |
Auto-generate documentation strings for this file | from enum import Enum
from config import conf
from common.log import logger
import requests
import threading
import time
from bridge.reply import Reply, ReplyType
import asyncio
from bridge.context import ContextType
from plugins import EventContext, EventAction
from .utils import Util
INVALID_REQUEST = 410
NOT_FOUND_ORIGIN_IMAGE = 461
NOT_FOUND_TASK = 462
class TaskType(Enum):
GENERATE = "generate"
UPSCALE = "upscale"
VARIATION = "variation"
RESET = "reset"
def __str__(self):
return self.name
class Status(Enum):
PENDING = "pending"
FINISHED = "finished"
EXPIRED = "expired"
ABORTED = "aborted"
def __str__(self):
return self.name
class TaskMode(Enum):
FAST = "fast"
RELAX = "relax"
task_name_mapping = {
TaskType.GENERATE.name: "生成",
TaskType.UPSCALE.name: "放大",
TaskType.VARIATION.name: "变换",
TaskType.RESET.name: "重新生成",
}
class MJTask:
def __init__(self, id, user_id: str, task_type: TaskType, raw_prompt=None, expires: int = 60 * 6,
status=Status.PENDING):
self.id = id
self.user_id = user_id
self.task_type = task_type
self.raw_prompt = raw_prompt
self.send_func = None # send_func(img_url)
self.expiry_time = time.time() + expires
self.status = status
self.img_url = None # url
self.img_id = None
def __str__(self):
return f"id={self.id}, user_id={self.user_id}, task_type={self.task_type}, status={self.status}, img_id={self.img_id}"
# midjourney bot
class MJBot:
def __init__(self, config, fetch_group_app_code):
self.base_url = conf().get("linkai_api_base", "https://api.link-ai.tech") + "/v1/img/midjourney"
self.headers = {"Authorization": "Bearer " + conf().get("linkai_api_key")}
self.config = config
self.fetch_group_app_code = fetch_group_app_code
self.tasks = {}
self.temp_dict = {}
self.tasks_lock = threading.Lock()
self.event_loop = asyncio.new_event_loop()
def judge_mj_task_type(self, e_context: EventContext):
if not self.config:
return None
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
context = e_context['context']
if context.type == ContextType.TEXT:
cmd_list = context.content.split(maxsplit=1)
if not cmd_list:
return None
if cmd_list[0].lower() == f"{trigger_prefix}mj":
return TaskType.GENERATE
elif cmd_list[0].lower() == f"{trigger_prefix}mju":
return TaskType.UPSCALE
elif cmd_list[0].lower() == f"{trigger_prefix}mjv":
return TaskType.VARIATION
elif cmd_list[0].lower() == f"{trigger_prefix}mjr":
return TaskType.RESET
elif context.type == ContextType.IMAGE_CREATE and self.config.get("use_image_create_prefix") and self._is_mj_open(context):
return TaskType.GENERATE
def process_mj_task(self, mj_type: TaskType, e_context: EventContext):
context = e_context['context']
session_id = context["session_id"]
cmd = context.content.split(maxsplit=1)
if len(cmd) == 1 and context.type == ContextType.TEXT:
# midjourney 帮助指令
self._set_reply_text(self.get_help_text(verbose=True), e_context, level=ReplyType.INFO)
return
if len(cmd) == 2 and (cmd[1] == "open" or cmd[1] == "close"):
if not Util.is_admin(e_context):
Util.set_reply_text("需要管理员权限执行", e_context, level=ReplyType.ERROR)
return
# midjourney 开关指令
is_open = True
tips_text = "开启"
if cmd[1] == "close":
tips_text = "关闭"
is_open = False
self.config["enabled"] = is_open
self._set_reply_text(f"Midjourney绘画已{tips_text}", e_context, level=ReplyType.INFO)
return
if not self._is_mj_open(context):
logger.warn("Midjourney绘画未开启,请查看 plugins/linkai/config.json 中的配置,或者在LinkAI平台 应用中添加/打开”MJ“插件")
self._set_reply_text(f"Midjourney绘画未开启", e_context, level=ReplyType.INFO)
return
if not self._check_rate_limit(session_id, e_context):
logger.warn("[MJ] midjourney task exceed rate limit")
return
if mj_type == TaskType.GENERATE:
if context.type == ContextType.IMAGE_CREATE:
raw_prompt = context.content
else:
# 图片生成
raw_prompt = cmd[1]
reply = self.generate(raw_prompt, session_id, e_context)
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
elif mj_type == TaskType.UPSCALE or mj_type == TaskType.VARIATION:
# 图片放大/变换
clist = cmd[1].split()
if len(clist) < 2:
self._set_reply_text(f"{cmd[0]} 命令缺少参数", e_context)
return
img_id = clist[0]
index = int(clist[1])
if index < 1 or index > 4:
self._set_reply_text(f"图片序号 {index} 错误,应在 1 至 4 之间", e_context)
return
key = f"{str(mj_type)}_{img_id}_{index}"
if self.temp_dict.get(key):
self._set_reply_text(f"第 {index} 张图片已经{task_name_mapping.get(str(mj_type))}过了", e_context)
return
# 执行图片放大/变换操作
reply = self.do_operate(mj_type, session_id, img_id, e_context, index)
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
elif mj_type == TaskType.RESET:
# 图片重新生成
clist = cmd[1].split()
if len(clist) < 1:
self._set_reply_text(f"{cmd[0]} 命令缺少参数", e_context)
return
img_id = clist[0]
# 图片重新生成
reply = self.do_operate(mj_type, session_id, img_id, e_context)
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
else:
self._set_reply_text(f"暂不支持该命令", e_context)
def generate(self, prompt: str, user_id: str, e_context: EventContext) -> Reply:
logger.info(f"[MJ] image generate, prompt={prompt}")
mode = self._fetch_mode(prompt)
body = {"prompt": prompt, "mode": mode, "auto_translate": self.config.get("auto_translate")}
if not self.config.get("img_proxy"):
body["img_proxy"] = False
res = requests.post(url=self.base_url + "/generate", json=body, headers=self.headers, timeout=(5, 40))
if res.status_code == 200:
res = res.json()
logger.debug(f"[MJ] image generate, res={res}")
if res.get("code") == 200:
task_id = res.get("data").get("task_id")
real_prompt = res.get("data").get("real_prompt")
if mode == TaskMode.RELAX.value:
time_str = "1~10分钟"
else:
time_str = "1分钟"
content = f"🚀您的作品将在{time_str}左右完成,请耐心等待\n- - - - - - - - -\n"
if real_prompt:
content += f"初始prompt: {prompt}\n转换后prompt: {real_prompt}"
else:
content += f"prompt: {prompt}"
reply = Reply(ReplyType.INFO, content)
task = MJTask(id=task_id, status=Status.PENDING, raw_prompt=prompt, user_id=user_id,
task_type=TaskType.GENERATE)
# put to memory dict
self.tasks[task.id] = task
# asyncio.run_coroutine_threadsafe(self.check_task(task, e_context), self.event_loop)
self._do_check_task(task, e_context)
return reply
else:
res_json = res.json()
logger.error(f"[MJ] generate error, msg={res_json.get('message')}, status_code={res.status_code}")
if res.status_code == INVALID_REQUEST:
reply = Reply(ReplyType.ERROR, "图片生成失败,请检查提示词参数或内容")
else:
reply = Reply(ReplyType.ERROR, "图片生成失败,请稍后再试")
return reply
def do_operate(self, task_type: TaskType, user_id: str, img_id: str, e_context: EventContext,
index: int = None) -> Reply:
logger.info(f"[MJ] image operate, task_type={task_type}, img_id={img_id}, index={index}")
body = {"type": task_type.name, "img_id": img_id}
if index:
body["index"] = index
if not self.config.get("img_proxy"):
body["img_proxy"] = False
res = requests.post(url=self.base_url + "/operate", json=body, headers=self.headers, timeout=(5, 40))
logger.debug(res)
if res.status_code == 200:
res = res.json()
if res.get("code") == 200:
task_id = res.get("data").get("task_id")
logger.info(f"[MJ] image operate processing, task_id={task_id}")
icon_map = {TaskType.UPSCALE: "🔎", TaskType.VARIATION: "🪄", TaskType.RESET: "🔄"}
content = f"{icon_map.get(task_type)}图片正在{task_name_mapping.get(task_type.name)}中,请耐心等待"
reply = Reply(ReplyType.INFO, content)
task = MJTask(id=task_id, status=Status.PENDING, user_id=user_id, task_type=task_type)
# put to memory dict
self.tasks[task.id] = task
key = f"{task_type.name}_{img_id}_{index}"
self.temp_dict[key] = True
# asyncio.run_coroutine_threadsafe(self.check_task(task, e_context), self.event_loop)
self._do_check_task(task, e_context)
return reply
else:
error_msg = ""
if res.status_code == NOT_FOUND_ORIGIN_IMAGE:
error_msg = "请输入正确的图片ID"
res_json = res.json()
logger.error(f"[MJ] operate error, msg={res_json.get('message')}, status_code={res.status_code}")
reply = Reply(ReplyType.ERROR, error_msg or "图片生成失败,请稍后再试")
return reply
def check_task_sync(self, task: MJTask, e_context: EventContext):
logger.debug(f"[MJ] start check task status, {task}")
max_retry_times = 90
while max_retry_times > 0:
time.sleep(10)
url = f"{self.base_url}/tasks/{task.id}"
try:
res = requests.get(url, headers=self.headers, timeout=8)
if res.status_code == 200:
res_json = res.json()
logger.debug(f"[MJ] task check res sync, task_id={task.id}, status={res.status_code}, "
f"data={res_json.get('data')}, thread={threading.current_thread().name}")
if res_json.get("data") and res_json.get("data").get("status") == Status.FINISHED.name:
# process success res
if self.tasks.get(task.id):
self.tasks[task.id].status = Status.FINISHED
self._process_success_task(task, res_json.get("data"), e_context)
return
max_retry_times -= 1
else:
res_json = res.json()
logger.warn(f"[MJ] image check error, status_code={res.status_code}, res={res_json}")
max_retry_times -= 20
except Exception as e:
max_retry_times -= 20
logger.warn(e)
logger.warn("[MJ] end from poll")
if self.tasks.get(task.id):
self.tasks[task.id].status = Status.EXPIRED
def _do_check_task(self, task: MJTask, e_context: EventContext):
threading.Thread(target=self.check_task_sync, args=(task, e_context)).start()
def _process_success_task(self, task: MJTask, res: dict, e_context: EventContext):
# channel send img
task.status = Status.FINISHED
task.img_id = res.get("img_id")
task.img_url = res.get("img_url")
logger.info(f"[MJ] task success, task_id={task.id}, img_id={task.img_id}, img_url={task.img_url}")
# send img
reply = Reply(ReplyType.IMAGE_URL, task.img_url)
channel = e_context["channel"]
_send(channel, reply, e_context["context"])
# send info
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
text = ""
if task.task_type == TaskType.GENERATE or task.task_type == TaskType.VARIATION or task.task_type == TaskType.RESET:
text = f"🎨绘画完成!\n"
if task.raw_prompt:
text += f"prompt: {task.raw_prompt}\n"
text += f"- - - - - - - - -\n图片ID: {task.img_id}"
text += f"\n\n🔎使用 {trigger_prefix}mju 命令放大图片\n"
text += f"例如:\n{trigger_prefix}mju {task.img_id} 1"
text += f"\n\n🪄使用 {trigger_prefix}mjv 命令变换图片\n"
text += f"例如:\n{trigger_prefix}mjv {task.img_id} 1"
text += f"\n\n🔄使用 {trigger_prefix}mjr 命令重新生成图片\n"
text += f"例如:\n{trigger_prefix}mjr {task.img_id}"
reply = Reply(ReplyType.INFO, text)
_send(channel, reply, e_context["context"])
self._print_tasks()
return
def _check_rate_limit(self, user_id: str, e_context: EventContext) -> bool:
tasks = self.find_tasks_by_user_id(user_id)
task_count = len([t for t in tasks if t.status == Status.PENDING])
if task_count >= self.config.get("max_tasks_per_user"):
reply = Reply(ReplyType.INFO, "您的Midjourney作图任务数已达上限,请稍后再试")
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
return False
task_count = len([t for t in self.tasks.values() if t.status == Status.PENDING])
if task_count >= self.config.get("max_tasks"):
reply = Reply(ReplyType.INFO, "Midjourney作图任务数已达上限,请稍后再试")
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
return False
return True
def _fetch_mode(self, prompt) -> str:
mode = self.config.get("mode")
if "--relax" in prompt or mode == TaskMode.RELAX.value:
return TaskMode.RELAX.value
return mode or TaskMode.FAST.value
def _run_loop(self, loop: asyncio.BaseEventLoop):
loop.run_forever()
loop.stop()
def _print_tasks(self):
for id in self.tasks:
logger.debug(f"[MJ] current task: {self.tasks[id]}")
def _set_reply_text(self, content: str, e_context: EventContext, level: ReplyType = ReplyType.ERROR):
reply = Reply(level, content)
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
def get_help_text(self, verbose=False, **kwargs):
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
help_text = "🎨利用Midjourney进行画图\n\n"
if not verbose:
return help_text
help_text += f" - 生成: {trigger_prefix}mj 描述词1, 描述词2.. \n - 放大: {trigger_prefix}mju 图片ID 图片序号\n - 变换: mjv 图片ID 图片序号\n - 重置: mjr 图片ID"
help_text += f"\n\n例如:\n\"{trigger_prefix}mj a little cat, white --ar 9:16\"\n\"{trigger_prefix}mju 11055927171882 2\""
help_text += f"\n\"{trigger_prefix}mjv 11055927171882 2\"\n\"{trigger_prefix}mjr 11055927171882\""
return help_text
def find_tasks_by_user_id(self, user_id) -> list:
result = []
with self.tasks_lock:
now = time.time()
for task in self.tasks.values():
if task.status == Status.PENDING and now > task.expiry_time:
task.status = Status.EXPIRED
logger.info(f"[MJ] {task} expired")
if task.user_id == user_id:
result.append(task)
return result
def _is_mj_open(self, context) -> bool:
# 获取远程应用插件状态
remote_enabled = False
if context.kwargs.get("isgroup"):
# 群聊场景只查询群对应的app_code
group_name = context.get("msg").from_user_nickname
app_code = self.fetch_group_app_code(group_name)
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "Midjourney")
else:
# 非群聊场景使用全局app_code
app_code = conf().get("linkai_app_code")
if app_code:
remote_enabled = Util.fetch_app_plugin(app_code, "Midjourney")
# 本地配置
base_enabled = self.config.get("enabled")
return base_enabled or remote_enabled
def _send(channel, reply: Reply, context, retry_cnt=0):
try:
channel.send(reply, context)
except Exception as e:
logger.error("[WX] sendMsg error: {}".format(str(e)))
if isinstance(e, NotImplementedError):
return
logger.exception(e)
if retry_cnt < 2:
time.sleep(3 + 3 * retry_cnt)
channel.send(reply, context, retry_cnt + 1)
def check_prefix(content, prefix_list):
if not prefix_list:
return None
for prefix in prefix_list:
if content.startswith(prefix):
return prefix
return None | --- +++ @@ -79,6 +79,11 @@ self.event_loop = asyncio.new_event_loop()
def judge_mj_task_type(self, e_context: EventContext):
+ """
+ 判断MJ任务的类型
+ :param e_context: 上下文
+ :return: 任务类型枚举
+ """
if not self.config:
return None
trigger_prefix = conf().get("plugin_trigger_prefix", "$")
@@ -99,6 +104,11 @@ return TaskType.GENERATE
def process_mj_task(self, mj_type: TaskType, e_context: EventContext):
+ """
+ 处理mj任务
+ :param mj_type: mj任务类型
+ :param e_context: 对话上下文
+ """
context = e_context['context']
session_id = context["session_id"]
cmd = context.content.split(maxsplit=1)
@@ -177,6 +187,13 @@ self._set_reply_text(f"暂不支持该命令", e_context)
def generate(self, prompt: str, user_id: str, e_context: EventContext) -> Reply:
+ """
+ 图片生成
+ :param prompt: 提示词
+ :param user_id: 用户id
+ :param e_context: 对话上下文
+ :return: 任务ID
+ """
logger.info(f"[MJ] image generate, prompt={prompt}")
mode = self._fetch_mode(prompt)
body = {"prompt": prompt, "mode": mode, "auto_translate": self.config.get("auto_translate")}
@@ -284,6 +301,12 @@ threading.Thread(target=self.check_task_sync, args=(task, e_context)).start()
def _process_success_task(self, task: MJTask, res: dict, e_context: EventContext):
+ """
+ 处理任务成功的结果
+ :param task: MJ任务
+ :param res: 请求结果
+ :param e_context: 对话上下文
+ """
# channel send img
task.status = Status.FINISHED
task.img_id = res.get("img_id")
@@ -316,6 +339,12 @@ return
def _check_rate_limit(self, user_id: str, e_context: EventContext) -> bool:
+ """
+ midjourney任务限流控制
+ :param user_id: 用户id
+ :param e_context: 对话上下文
+ :return: 任务是否能够生成, True:可以生成, False: 被限流
+ """
tasks = self.find_tasks_by_user_id(user_id)
task_count = len([t for t in tasks if t.status == Status.PENDING])
if task_count >= self.config.get("max_tasks_per_user"):
@@ -338,6 +367,10 @@ return mode or TaskMode.FAST.value
def _run_loop(self, loop: asyncio.BaseEventLoop):
+ """
+ 运行事件循环,用于轮询任务的线程
+ :param loop: 事件循环
+ """
loop.run_forever()
loop.stop()
@@ -346,6 +379,12 @@ logger.debug(f"[MJ] current task: {self.tasks[id]}")
def _set_reply_text(self, content: str, e_context: EventContext, level: ReplyType = ReplyType.ERROR):
+ """
+ 设置回复文本
+ :param content: 回复内容
+ :param e_context: 对话上下文
+ :param level: 回复等级
+ """
reply = Reply(level, content)
e_context["reply"] = reply
e_context.action = EventAction.BREAK_PASS
@@ -411,4 +450,4 @@ for prefix in prefix_list:
if content.startswith(prefix):
return prefix
- return None+ return None
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/plugins/linkai/midjourney.py |
Add docstrings for internal functions | from models.session_manager import Session
from common.log import logger
from common import const
"""
e.g. [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Who won the world series in 2020?"},
{"role": "assistant", "content": "The Los Angeles Dodgers won the World Series in 2020."},
{"role": "user", "content": "Where was it played?"}
]
"""
class ChatGPTSession(Session):
def __init__(self, session_id, system_prompt=None, model="gpt-3.5-turbo"):
super().__init__(session_id, system_prompt)
self.model = model
self.reset()
def discard_exceeding(self, max_tokens, cur_tokens=None):
precise = True
try:
cur_tokens = self.calc_tokens()
except Exception as e:
precise = False
if cur_tokens is None:
raise e
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
while cur_tokens > max_tokens:
if len(self.messages) > 2:
self.messages.pop(1)
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
self.messages.pop(1)
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
break
elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
break
else:
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(max_tokens, cur_tokens, len(self.messages)))
break
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
return cur_tokens
def calc_tokens(self):
return num_tokens_from_messages(self.messages, self.model)
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_messages(messages, model):
if model in ["wenxin", "xunfei"] or model.startswith(const.GEMINI):
return num_tokens_by_character(messages)
import tiktoken
if model in ["gpt-3.5-turbo-0301", "gpt-35-turbo", "gpt-3.5-turbo-1106", "moonshot", const.LINKAI_35]:
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
elif model in ["gpt-4-0314", "gpt-4-0613", "gpt-4-32k", "gpt-4-32k-0613", "gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-35-turbo-16k", "gpt-4-turbo-preview",
"gpt-4-1106-preview", const.GPT4_TURBO_PREVIEW, const.GPT4_VISION_PREVIEW, const.GPT4_TURBO_01_25,
const.GPT_4o, const.GPT_4O_0806, const.GPT_4o_MINI, const.LINKAI_4o, const.LINKAI_4_TURBO, const.GPT_5, const.GPT_5_MINI, const.GPT_5_NANO]:
return num_tokens_from_messages(messages, model="gpt-4")
elif model.startswith("claude-3"):
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
logger.debug("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif model == "gpt-4":
tokens_per_message = 3
tokens_per_name = 1
else:
logger.debug(f"num_tokens_from_messages() is not implemented for model {model}. Returning num tokens assuming gpt-3.5-turbo.")
return num_tokens_from_messages(messages, model="gpt-3.5-turbo")
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def num_tokens_by_character(messages):
tokens = 0
for msg in messages:
tokens += len(msg["content"])
return tokens | --- +++ @@ -55,6 +55,7 @@
# refer to https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_messages(messages, model):
+ """Returns the number of tokens used by a list of messages."""
if model in ["wenxin", "xunfei"] or model.startswith(const.GEMINI):
return num_tokens_by_character(messages)
@@ -96,7 +97,8 @@
def num_tokens_by_character(messages):
+ """Returns the number of tokens used by a list of messages."""
tokens = 0
for msg in messages:
tokens += len(msg["content"])
- return tokens+ return tokens
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/chatgpt/chat_gpt_session.py |
Add docstrings including usage examples | # encoding:utf-8
import time
import openai
from models.openai.openai_compat import RateLimitError, Timeout, APIConnectionError
from models.bot import Bot
from models.openai_compatible_bot import OpenAICompatibleBot
from models.openai.open_ai_image import OpenAIImage
from models.openai.open_ai_session import OpenAISession
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
user_session = dict()
# OpenAI对话模型API (可用)
class OpenAIBot(Bot, OpenAIImage, OpenAICompatibleBot):
def __init__(self):
super().__init__()
openai.api_key = conf().get("open_ai_api_key")
if conf().get("open_ai_api_base"):
openai.api_base = conf().get("open_ai_api_base")
proxy = conf().get("proxy")
if proxy:
openai.proxy = proxy
self.sessions = SessionManager(OpenAISession, model=conf().get("model") or "text-davinci-003")
self.args = {
"model": conf().get("model") or "text-davinci-003", # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
"max_tokens": 1200, # 回复最大的字符数
"top_p": 1,
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
"stop": ["\n\n\n"],
}
def get_api_config(self):
return {
'api_key': conf().get("open_ai_api_key"),
'api_base': conf().get("open_ai_api_base"),
'model': conf().get("model", "text-davinci-003"),
'default_temperature': conf().get("temperature", 0.9),
'default_top_p': conf().get("top_p", 1.0),
'default_frequency_penalty': conf().get("frequency_penalty", 0.0),
'default_presence_penalty': conf().get("presence_penalty", 0.0),
}
def reply(self, query, context=None):
# acquire reply content
if context and context.type:
if context.type == ContextType.TEXT:
logger.info("[OPEN_AI] query={}".format(query))
session_id = context["session_id"]
reply = None
if query == "#清除记忆":
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
else:
session = self.sessions.session_query(query, session_id)
result = self.reply_text(session)
total_tokens, completion_tokens, reply_content = (
result["total_tokens"],
result["completion_tokens"],
result["content"],
)
logger.debug(
"[OPEN_AI] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(str(session), session_id, reply_content, completion_tokens)
)
if total_tokens == 0:
reply = Reply(ReplyType.ERROR, reply_content)
else:
self.sessions.session_reply(reply_content, session_id, total_tokens)
reply = Reply(ReplyType.TEXT, reply_content)
return reply
elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
def reply_text(self, session: OpenAISession, retry_count=0):
try:
response = openai.Completion.create(prompt=str(session), **self.args)
res_content = response.choices[0]["text"].strip().replace("<|endoftext|>", "")
total_tokens = response["usage"]["total_tokens"]
completion_tokens = response["usage"]["completion_tokens"]
logger.info("[OPEN_AI] reply={}".format(res_content))
return {
"total_tokens": total_tokens,
"completion_tokens": completion_tokens,
"content": res_content,
}
except Exception as e:
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if isinstance(e, RateLimitError):
logger.warn("[OPEN_AI] RateLimitError: {}".format(e))
result["content"] = "提问太快啦,请休息一下再问我吧"
if need_retry:
time.sleep(20)
elif isinstance(e, Timeout):
logger.warn("[OPEN_AI] Timeout: {}".format(e))
result["content"] = "我没有收到你的消息"
if need_retry:
time.sleep(5)
elif isinstance(e, APIConnectionError):
logger.warn("[OPEN_AI] APIConnectionError: {}".format(e))
need_retry = False
result["content"] = "我连接不到你的网络"
else:
logger.warn("[OPEN_AI] Exception: {}".format(e))
need_retry = False
self.sessions.clear_session(session.session_id)
if need_retry:
logger.warn("[OPEN_AI] 第{}次重试".format(retry_count + 1))
return self.reply_text(session, retry_count + 1)
else:
return result
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
try:
# The old Completion API doesn't support tools
# We need to use ChatCompletion API instead
logger.info("[OPEN_AI] Using ChatCompletion API for tool support")
# Build request parameters for ChatCompletion
request_params = {
"model": kwargs.get("model", conf().get("model") or "gpt-4.1"),
"messages": messages,
"temperature": kwargs.get("temperature", conf().get("temperature", 0.9)),
"top_p": kwargs.get("top_p", 1),
"frequency_penalty": kwargs.get("frequency_penalty", conf().get("frequency_penalty", 0.0)),
"presence_penalty": kwargs.get("presence_penalty", conf().get("presence_penalty", 0.0)),
"stream": stream
}
# Add max_tokens if specified
if kwargs.get("max_tokens"):
request_params["max_tokens"] = kwargs["max_tokens"]
# Add tools if provided
if tools:
request_params["tools"] = tools
request_params["tool_choice"] = kwargs.get("tool_choice", "auto")
# Make API call using ChatCompletion
if stream:
return self._handle_stream_response(request_params)
else:
return self._handle_sync_response(request_params)
except Exception as e:
logger.error(f"[OPEN_AI] call_with_tools error: {e}")
if stream:
def error_generator():
yield {
"error": True,
"message": str(e),
"status_code": 500
}
return error_generator()
else:
return {
"error": True,
"message": str(e),
"status_code": 500
}
def _handle_sync_response(self, request_params):
try:
response = openai.ChatCompletion.create(**request_params)
logger.info(f"[OPEN_AI] call_with_tools reply, model={response.get('model')}, "
f"total_tokens={response.get('usage', {}).get('total_tokens', 0)}")
return response
except Exception as e:
logger.error(f"[OPEN_AI] sync response error: {e}")
raise
def _handle_stream_response(self, request_params):
try:
stream = openai.ChatCompletion.create(**request_params)
for chunk in stream:
yield chunk
except Exception as e:
logger.error(f"[OPEN_AI] stream response error: {e}")
yield {
"error": True,
"message": str(e),
"status_code": 500
} | --- +++ @@ -43,6 +43,7 @@ }
def get_api_config(self):
+ """Get API configuration for OpenAI-compatible base class"""
return {
'api_key': conf().get("open_ai_api_key"),
'api_base': conf().get("open_ai_api_base"),
@@ -134,6 +135,22 @@ return result
def call_with_tools(self, messages, tools=None, stream=False, **kwargs):
+ """
+ Call OpenAI API with tool support for agent integration
+ Note: This bot uses the old Completion API which doesn't support tools.
+ For tool support, use ChatGPTBot instead.
+
+ This method converts to ChatCompletion API when tools are provided.
+
+ Args:
+ messages: List of messages
+ tools: List of tool definitions (OpenAI format)
+ stream: Whether to use streaming
+ **kwargs: Additional parameters
+
+ Returns:
+ Formatted response in OpenAI format or generator for streaming
+ """
try:
# The old Completion API doesn't support tools
# We need to use ChatCompletion API instead
@@ -183,6 +200,7 @@ }
def _handle_sync_response(self, request_params):
+ """Handle synchronous OpenAI ChatCompletion API response"""
try:
response = openai.ChatCompletion.create(**request_params)
@@ -196,6 +214,7 @@ raise
def _handle_stream_response(self, request_params):
+ """Handle streaming OpenAI ChatCompletion API response"""
try:
stream = openai.ChatCompletion.create(**request_params)
@@ -208,4 +227,4 @@ "error": True,
"message": str(e),
"status_code": 500
- }+ }
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/openai/open_ai_bot.py |
Add professional docstrings to my codebase | # encoding:utf-8
import time
import json
import openai
from models.openai.openai_compat import error as openai_error, RateLimitError, Timeout, APIError, APIConnectionError
import requests
from common import const
from models.bot import Bot
from models.openai_compatible_bot import OpenAICompatibleBot
from models.chatgpt.chat_gpt_session import ChatGPTSession
from models.openai.open_ai_image import OpenAIImage
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from common.token_bucket import TokenBucket
from config import conf, load_config
from models.baidu.baidu_wenxin_session import BaiduWenxinSession
# OpenAI对话模型API (可用)
class ChatGPTBot(Bot, OpenAIImage, OpenAICompatibleBot):
def __init__(self):
super().__init__()
# set the default api_key
openai.api_key = conf().get("open_ai_api_key")
if conf().get("open_ai_api_base"):
openai.api_base = conf().get("open_ai_api_base")
proxy = conf().get("proxy")
if proxy:
openai.proxy = proxy
if conf().get("rate_limit_chatgpt"):
self.tb4chatgpt = TokenBucket(conf().get("rate_limit_chatgpt", 20))
conf_model = conf().get("model") or "gpt-3.5-turbo"
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
# o1相关模型不支持system prompt,暂时用文心模型的session
self.args = {
"model": conf_model, # 对话模型的名称
"temperature": conf().get("temperature", 0.9), # 值在[0,1]之间,越大表示回复越具有不确定性
# "max_tokens":4096, # 回复最大的字符数
"top_p": conf().get("top_p", 1),
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"request_timeout": conf().get("request_timeout", None), # 请求超时时间,openai接口默认设置为600,对于难问题一般需要较长时间
"timeout": conf().get("request_timeout", None), # 重试超时时间,在这个时间内,将会自动重试
}
# 部分模型暂不支持一些参数,特殊处理
if conf_model in [const.O1, const.O1_MINI, const.GPT_5, const.GPT_5_MINI, const.GPT_5_NANO]:
remove_keys = ["temperature", "top_p", "frequency_penalty", "presence_penalty"]
for key in remove_keys:
self.args.pop(key, None) # 如果键不存在,使用 None 来避免抛出错、
if conf_model in [const.O1, const.O1_MINI]: # o1系列模型不支持系统提示词,使用文心模型的session
self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI)
def get_api_config(self):
return {
'api_key': conf().get("open_ai_api_key"),
'api_base': conf().get("open_ai_api_base"),
'model': conf().get("model", "gpt-3.5-turbo"),
'default_temperature': conf().get("temperature", 0.9),
'default_top_p': conf().get("top_p", 1.0),
'default_frequency_penalty': conf().get("frequency_penalty", 0.0),
'default_presence_penalty': conf().get("presence_penalty", 0.0),
}
def reply(self, query, context=None):
# acquire reply content
if context.type == ContextType.TEXT:
logger.info("[CHATGPT] query={}".format(query))
session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply
session = self.sessions.session_query(query, session_id)
logger.debug("[CHATGPT] session query={}".format(session.messages))
api_key = context.get("openai_api_key")
model = context.get("gpt_model")
new_args = None
if model:
new_args = self.args.copy()
new_args["model"] = model
# if context.get('stream'):
# # reply in stream
# return self.reply_text_stream(query, new_query, session_id)
reply_content = self.reply_text(session, api_key, args=new_args)
logger.debug(
"[CHATGPT] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages,
session_id,
reply_content["content"],
reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(reply_content["content"], session_id, reply_content["total_tokens"])
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[CHATGPT] reply {} used 0 tokens.".format(reply_content))
return reply
elif context.type == ContextType.IMAGE_CREATE:
ok, retstring = self.create_img(query, 0)
reply = None
if ok:
reply = Reply(ReplyType.IMAGE_URL, retstring)
else:
reply = Reply(ReplyType.ERROR, retstring)
return reply
elif context.type == ContextType.IMAGE:
logger.info("[CHATGPT] Image message received")
reply = self.reply_image(context)
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def reply_image(self, context):
import base64
import os
try:
image_path = context.content
logger.info(f"[CHATGPT] Processing image: {image_path}")
# Check if file exists
if not os.path.exists(image_path):
logger.error(f"[CHATGPT] Image file not found: {image_path}")
return Reply(ReplyType.ERROR, "图片文件不存在")
# Read and encode image
with open(image_path, "rb") as f:
image_data = f.read()
image_base64 = base64.b64encode(image_data).decode("utf-8")
# Detect image format
extension = os.path.splitext(image_path)[1].lower()
mime_type_map = {
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".png": "image/png",
".gif": "image/gif",
".webp": "image/webp"
}
mime_type = mime_type_map.get(extension, "image/jpeg")
# Get model and API config
model = context.get("gpt_model") or conf().get("model", "gpt-4o")
api_key = context.get("openai_api_key") or conf().get("open_ai_api_key")
api_base = conf().get("open_ai_api_base")
# Build vision request
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "请描述这张图片的内容"},
{
"type": "image_url",
"image_url": {
"url": f"data:{mime_type};base64,{image_base64}"
}
}
]
}
]
logger.info(f"[CHATGPT] Calling vision API with model: {model}")
# Call OpenAI API
kwargs = {
"model": model,
"messages": messages,
"max_tokens": 1000
}
if api_key:
kwargs["api_key"] = api_key
if api_base:
kwargs["api_base"] = api_base
response = openai.ChatCompletion.create(**kwargs)
content = response.choices[0]["message"]["content"]
logger.info(f"[CHATGPT] Vision API response: {content[:100]}...")
# Clean up temp file
try:
os.remove(image_path)
logger.debug(f"[CHATGPT] Removed temp image file: {image_path}")
except Exception:
pass
return Reply(ReplyType.TEXT, content)
except Exception as e:
logger.error(f"[CHATGPT] Image processing error: {e}")
import traceback
logger.error(traceback.format_exc())
return Reply(ReplyType.ERROR, f"图片识别失败: {str(e)}")
def reply_text(self, session: ChatGPTSession, api_key=None, args=None, retry_count=0) -> dict:
try:
if conf().get("rate_limit_chatgpt") and not self.tb4chatgpt.get_token():
raise RateLimitError("RateLimitError: rate limit exceeded")
# if api_key == None, the default openai.api_key will be used
if args is None:
args = self.args
response = openai.ChatCompletion.create(api_key=api_key, messages=session.messages, **args)
# logger.debug("[CHATGPT] response={}".format(response))
logger.info("[ChatGPT] reply={}, total_tokens={}".format(response.choices[0]['message']['content'], response["usage"]["total_tokens"]))
return {
"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response.choices[0]["message"]["content"],
}
except Exception as e:
need_retry = retry_count < 2
result = {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
if isinstance(e, RateLimitError):
logger.warn("[CHATGPT] RateLimitError: {}".format(e))
result["content"] = "提问太快啦,请休息一下再问我吧"
if need_retry:
time.sleep(20)
elif isinstance(e, Timeout):
logger.warn("[CHATGPT] Timeout: {}".format(e))
result["content"] = "我没有收到你的消息"
if need_retry:
time.sleep(5)
elif isinstance(e, APIError):
logger.warn("[CHATGPT] Bad Gateway: {}".format(e))
result["content"] = "请再问我一次"
if need_retry:
time.sleep(10)
elif isinstance(e, APIConnectionError):
logger.warn("[CHATGPT] APIConnectionError: {}".format(e))
result["content"] = "我连接不到你的网络"
if need_retry:
time.sleep(5)
else:
logger.exception("[CHATGPT] Exception: {}".format(e))
need_retry = False
self.sessions.clear_session(session.session_id)
if need_retry:
logger.warn("[CHATGPT] 第{}次重试".format(retry_count + 1))
return self.reply_text(session, api_key, args, retry_count + 1)
else:
return result
class AzureChatGPTBot(ChatGPTBot):
def __init__(self):
super().__init__()
openai.api_type = "azure"
openai.api_version = conf().get("azure_api_version", "2023-06-01-preview")
self.args["deployment_id"] = conf().get("azure_deployment_id")
def create_img(self, query, retry_count=0, api_key=None):
text_to_image_model = conf().get("text_to_image")
if text_to_image_model == "dall-e-2":
api_version = "2023-06-01-preview"
endpoint = conf().get("azure_openai_dalle_api_base","open_ai_api_base")
# 检查endpoint是否以/结尾
if not endpoint.endswith("/"):
endpoint = endpoint + "/"
url = "{}openai/images/generations:submit?api-version={}".format(endpoint, api_version)
api_key = conf().get("azure_openai_dalle_api_key","open_ai_api_key")
headers = {"api-key": api_key, "Content-Type": "application/json"}
try:
body = {"prompt": query, "size": conf().get("image_create_size", "256x256"),"n": 1}
submission = requests.post(url, headers=headers, json=body)
operation_location = submission.headers['operation-location']
status = ""
while (status != "succeeded"):
if retry_count > 3:
return False, "图片生成失败"
response = requests.get(operation_location, headers=headers)
status = response.json()['status']
retry_count += 1
image_url = response.json()['result']['data'][0]['url']
return True, image_url
except Exception as e:
logger.error("create image error: {}".format(e))
return False, "图片生成失败"
elif text_to_image_model == "dall-e-3":
api_version = conf().get("azure_api_version", "2024-02-15-preview")
endpoint = conf().get("azure_openai_dalle_api_base","open_ai_api_base")
# 检查endpoint是否以/结尾
if not endpoint.endswith("/"):
endpoint = endpoint + "/"
url = "{}openai/deployments/{}/images/generations?api-version={}".format(endpoint, conf().get("azure_openai_dalle_deployment_id","text_to_image"),api_version)
api_key = conf().get("azure_openai_dalle_api_key","open_ai_api_key")
headers = {"api-key": api_key, "Content-Type": "application/json"}
try:
body = {"prompt": query, "size": conf().get("image_create_size", "1024x1024"), "quality": conf().get("dalle3_image_quality", "standard")}
response = requests.post(url, headers=headers, json=body)
response.raise_for_status() # 检查请求是否成功
data = response.json()
# 检查响应中是否包含图像 URL
if 'data' in data and len(data['data']) > 0 and 'url' in data['data'][0]:
image_url = data['data'][0]['url']
return True, image_url
else:
error_message = "响应中没有图像 URL"
logger.error(error_message)
return False, "图片生成失败"
except requests.exceptions.RequestException as e:
# 捕获所有请求相关的异常
try:
error_detail = response.json().get('error', {}).get('message', str(e))
except ValueError:
error_detail = str(e)
error_message = f"{error_detail}"
logger.error(error_message)
return False, error_message
except Exception as e:
# 捕获所有其他异常
error_message = f"生成图像时发生错误: {e}"
logger.error(error_message)
return False, "图片生成失败"
else:
return False, "图片生成失败,未配置text_to_image参数" | --- +++ @@ -55,6 +55,7 @@ self.sessions = SessionManager(BaiduWenxinSession, model=conf().get("model") or const.O1_MINI)
def get_api_config(self):
+ """Get API configuration for OpenAI-compatible base class"""
return {
'api_key': conf().get("open_ai_api_key"),
'api_base': conf().get("open_ai_api_base"),
@@ -133,6 +134,9 @@ return reply
def reply_image(self, context):
+ """
+ Process image message using OpenAI Vision API
+ """
import base64
import os
@@ -216,6 +220,13 @@ return Reply(ReplyType.ERROR, f"图片识别失败: {str(e)}")
def reply_text(self, session: ChatGPTSession, api_key=None, args=None, retry_count=0) -> dict:
+ """
+ call openai's ChatCompletion to get the answer
+ :param session: a conversation session
+ :param session_id: session id
+ :param retry_count: retry count
+ :return: {}
+ """
try:
if conf().get("rate_limit_chatgpt") and not self.tb4chatgpt.get_token():
raise RateLimitError("RateLimitError: rate limit exceeded")
@@ -338,4 +349,4 @@ logger.error(error_message)
return False, "图片生成失败"
else:
- return False, "图片生成失败,未配置text_to_image参数"+ return False, "图片生成失败,未配置text_to_image参数"
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/models/chatgpt/chat_gpt_bot.py |
Write docstrings describing functionality |
class Voice(object):
def voiceToText(self, voice_file):
raise NotImplementedError
def textToVoice(self, text):
raise NotImplementedError | --- +++ @@ -1,8 +1,17 @@+"""
+Voice service abstract class
+"""
class Voice(object):
def voiceToText(self, voice_file):
+ """
+ Send voice to voice service and get text
+ """
raise NotImplementedError
def textToVoice(self, text):
- raise NotImplementedError+ """
+ Send text to voice service and get voice
+ """
+ raise NotImplementedError
| https://raw.githubusercontent.com/zhayujie/chatgpt-on-wechat/HEAD/voice/voice.py |
Create documentation strings for testing functions |
import os
os.environ["PYTORCH_ALLOC_CONF"] = "expandable_segments:True"
os.environ["HF_HUB_DISABLE_PROGRESS_BARS"] = "1"
import gc
import math
import time
from dataclasses import dataclass, asdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from kernels import get_kernel
cap = torch.cuda.get_device_capability()
# varunneal's FA3 is Hopper only, use kernels-community on non-Hopper GPUs
repo = "varunneal/flash-attention-3" if cap == (9, 0) else "kernels-community/flash-attn3"
fa3 = get_kernel(repo).flash_attn_interface
from prepare import MAX_SEQ_LEN, TIME_BUDGET, Tokenizer, make_dataloader, evaluate_bpb
# ---------------------------------------------------------------------------
# GPT Model
# ---------------------------------------------------------------------------
@dataclass
class GPTConfig:
sequence_len: int = 2048
vocab_size: int = 32768
n_layer: int = 12
n_head: int = 6
n_kv_head: int = 6
n_embd: int = 768
window_pattern: str = "SSSL"
def norm(x):
return F.rms_norm(x, (x.size(-1),))
def has_ve(layer_idx, n_layer):
return layer_idx % 2 == (n_layer - 1) % 2
def apply_rotary_emb(x, cos, sin):
assert x.ndim == 4
d = x.shape[3] // 2
x1, x2 = x[..., :d], x[..., d:]
y1 = x1 * cos + x2 * sin
y2 = x1 * (-sin) + x2 * cos
return torch.cat([y1, y2], 3)
class CausalSelfAttention(nn.Module):
def __init__(self, config, layer_idx):
super().__init__()
self.n_head = config.n_head
self.n_kv_head = config.n_kv_head
self.n_embd = config.n_embd
self.head_dim = self.n_embd // self.n_head
assert self.n_embd % self.n_head == 0
assert self.n_kv_head <= self.n_head and self.n_head % self.n_kv_head == 0
self.c_q = nn.Linear(self.n_embd, self.n_head * self.head_dim, bias=False)
self.c_k = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False)
self.c_v = nn.Linear(self.n_embd, self.n_kv_head * self.head_dim, bias=False)
self.c_proj = nn.Linear(self.n_embd, self.n_embd, bias=False)
self.ve_gate_channels = 32
self.ve_gate = nn.Linear(self.ve_gate_channels, self.n_kv_head, bias=False) if has_ve(layer_idx, config.n_layer) else None
def forward(self, x, ve, cos_sin, window_size):
B, T, C = x.size()
q = self.c_q(x).view(B, T, self.n_head, self.head_dim)
k = self.c_k(x).view(B, T, self.n_kv_head, self.head_dim)
v = self.c_v(x).view(B, T, self.n_kv_head, self.head_dim)
# Value residual (ResFormer): mix in value embedding with input-dependent gate per head
if ve is not None:
ve = ve.view(B, T, self.n_kv_head, self.head_dim)
gate = 2 * torch.sigmoid(self.ve_gate(x[..., :self.ve_gate_channels]))
v = v + gate.unsqueeze(-1) * ve
cos, sin = cos_sin
q, k = apply_rotary_emb(q, cos, sin), apply_rotary_emb(k, cos, sin)
q, k = norm(q), norm(k)
y = fa3.flash_attn_func(q, k, v, causal=True, window_size=window_size)
y = y.contiguous().view(B, T, -1)
y = self.c_proj(y)
return y
class MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd, bias=False)
self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd, bias=False)
def forward(self, x):
x = self.c_fc(x)
x = F.relu(x).square()
x = self.c_proj(x)
return x
class Block(nn.Module):
def __init__(self, config, layer_idx):
super().__init__()
self.attn = CausalSelfAttention(config, layer_idx)
self.mlp = MLP(config)
def forward(self, x, ve, cos_sin, window_size):
x = x + self.attn(norm(x), ve, cos_sin, window_size)
x = x + self.mlp(norm(x))
return x
class GPT(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.window_sizes = self._compute_window_sizes(config)
self.transformer = nn.ModuleDict({
"wte": nn.Embedding(config.vocab_size, config.n_embd),
"h": nn.ModuleList([Block(config, i) for i in range(config.n_layer)]),
})
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.resid_lambdas = nn.Parameter(torch.ones(config.n_layer))
self.x0_lambdas = nn.Parameter(torch.zeros(config.n_layer))
# Value embeddings
head_dim = config.n_embd // config.n_head
kv_dim = config.n_kv_head * head_dim
self.value_embeds = nn.ModuleDict({
str(i): nn.Embedding(config.vocab_size, kv_dim)
for i in range(config.n_layer) if has_ve(i, config.n_layer)
})
# Rotary embeddings
self.rotary_seq_len = config.sequence_len * 10
cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim)
self.register_buffer("cos", cos, persistent=False)
self.register_buffer("sin", sin, persistent=False)
@torch.no_grad()
def init_weights(self):
# Embedding and unembedding
torch.nn.init.normal_(self.transformer.wte.weight, mean=0.0, std=1.0)
torch.nn.init.normal_(self.lm_head.weight, mean=0.0, std=0.001)
# Transformer blocks
n_embd = self.config.n_embd
s = 3**0.5 * n_embd**-0.5
for block in self.transformer.h:
torch.nn.init.uniform_(block.attn.c_q.weight, -s, s)
torch.nn.init.uniform_(block.attn.c_k.weight, -s, s)
torch.nn.init.uniform_(block.attn.c_v.weight, -s, s)
torch.nn.init.zeros_(block.attn.c_proj.weight)
torch.nn.init.uniform_(block.mlp.c_fc.weight, -s, s)
torch.nn.init.zeros_(block.mlp.c_proj.weight)
# Per-layer scalars
self.resid_lambdas.fill_(1.0)
self.x0_lambdas.fill_(0.1)
# Value embeddings
for ve in self.value_embeds.values():
torch.nn.init.uniform_(ve.weight, -s, s)
# Gate weights init to zero (sigmoid(0)=0.5, scaled by 2 -> 1.0 = neutral)
for block in self.transformer.h:
if block.attn.ve_gate is not None:
torch.nn.init.zeros_(block.attn.ve_gate.weight)
# Rotary embeddings
head_dim = self.config.n_embd // self.config.n_head
cos, sin = self._precompute_rotary_embeddings(self.rotary_seq_len, head_dim)
self.cos, self.sin = cos, sin
# Cast embeddings to bf16
self.transformer.wte.to(dtype=torch.bfloat16)
for ve in self.value_embeds.values():
ve.to(dtype=torch.bfloat16)
def _precompute_rotary_embeddings(self, seq_len, head_dim, base=10000, device=None):
if device is None:
device = self.transformer.wte.weight.device
channel_range = torch.arange(0, head_dim, 2, dtype=torch.float32, device=device)
inv_freq = 1.0 / (base ** (channel_range / head_dim))
t = torch.arange(seq_len, dtype=torch.float32, device=device)
freqs = torch.outer(t, inv_freq)
cos, sin = freqs.cos(), freqs.sin()
cos, sin = cos.bfloat16(), sin.bfloat16()
cos, sin = cos[None, :, None, :], sin[None, :, None, :]
return cos, sin
def _compute_window_sizes(self, config):
pattern = config.window_pattern.upper()
assert all(c in "SL" for c in pattern)
long_window = config.sequence_len
short_window = long_window // 2
char_to_window = {"L": (long_window, 0), "S": (short_window, 0)}
window_sizes = []
for layer_idx in range(config.n_layer):
char = pattern[layer_idx % len(pattern)]
window_sizes.append(char_to_window[char])
window_sizes[-1] = (long_window, 0)
return window_sizes
def estimate_flops(self):
nparams = sum(p.numel() for p in self.parameters())
value_embeds_numel = sum(ve.weight.numel() for ve in self.value_embeds.values())
nparams_exclude = (self.transformer.wte.weight.numel() + value_embeds_numel +
self.resid_lambdas.numel() + self.x0_lambdas.numel())
h = self.config.n_head
q = self.config.n_embd // self.config.n_head
t = self.config.sequence_len
attn_flops = 0
for window_size in self.window_sizes:
window = window_size[0]
effective_seq = t if window < 0 else min(window, t)
attn_flops += 12 * h * q * effective_seq
return 6 * (nparams - nparams_exclude) + attn_flops
def num_scaling_params(self):
wte = sum(p.numel() for p in self.transformer.wte.parameters())
value_embeds = sum(p.numel() for p in self.value_embeds.parameters())
lm_head = sum(p.numel() for p in self.lm_head.parameters())
transformer_matrices = sum(p.numel() for p in self.transformer.h.parameters())
scalars = self.resid_lambdas.numel() + self.x0_lambdas.numel()
total = wte + value_embeds + lm_head + transformer_matrices + scalars
return {
'wte': wte, 'value_embeds': value_embeds, 'lm_head': lm_head,
'transformer_matrices': transformer_matrices, 'scalars': scalars, 'total': total,
}
def setup_optimizer(self, unembedding_lr=0.004, embedding_lr=0.2, matrix_lr=0.02,
weight_decay=0.0, adam_betas=(0.8, 0.95), scalar_lr=0.5):
model_dim = self.config.n_embd
matrix_params = list(self.transformer.h.parameters())
value_embeds_params = list(self.value_embeds.parameters())
embedding_params = list(self.transformer.wte.parameters())
lm_head_params = list(self.lm_head.parameters())
resid_params = [self.resid_lambdas]
x0_params = [self.x0_lambdas]
assert len(list(self.parameters())) == (len(matrix_params) + len(embedding_params) +
len(lm_head_params) + len(value_embeds_params) + len(resid_params) + len(x0_params))
# Scale LR ∝ 1/√dmodel (tuned at 768 dim)
dmodel_lr_scale = (model_dim / 768) ** -0.5
print(f"Scaling AdamW LRs by 1/sqrt({model_dim}/768) = {dmodel_lr_scale:.6f}")
param_groups = [
dict(kind='adamw', params=lm_head_params, lr=unembedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0),
dict(kind='adamw', params=embedding_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0),
dict(kind='adamw', params=value_embeds_params, lr=embedding_lr * dmodel_lr_scale, betas=adam_betas, eps=1e-10, weight_decay=0.0),
dict(kind='adamw', params=resid_params, lr=scalar_lr * 0.01, betas=adam_betas, eps=1e-10, weight_decay=0.0),
dict(kind='adamw', params=x0_params, lr=scalar_lr, betas=(0.96, 0.95), eps=1e-10, weight_decay=0.0),
]
for shape in sorted({p.shape for p in matrix_params}):
group_params = [p for p in matrix_params if p.shape == shape]
param_groups.append(dict(
kind='muon', params=group_params, lr=matrix_lr,
momentum=0.95, ns_steps=5, beta2=0.95, weight_decay=weight_decay,
))
optimizer = MuonAdamW(param_groups)
for group in optimizer.param_groups:
group["initial_lr"] = group["lr"]
return optimizer
def forward(self, idx, targets=None, reduction='mean'):
B, T = idx.size()
assert T <= self.cos.size(1)
cos_sin = self.cos[:, :T], self.sin[:, :T]
x = self.transformer.wte(idx)
x = norm(x)
x0 = x
for i, block in enumerate(self.transformer.h):
x = self.resid_lambdas[i] * x + self.x0_lambdas[i] * x0
ve = self.value_embeds[str(i)](idx) if str(i) in self.value_embeds else None
x = block(x, ve, cos_sin, self.window_sizes[i])
x = norm(x)
softcap = 15
logits = self.lm_head(x)
logits = logits.float()
logits = softcap * torch.tanh(logits / softcap)
if targets is not None:
loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1),
ignore_index=-1, reduction=reduction)
return loss
return logits
# ---------------------------------------------------------------------------
# Optimizer (MuonAdamW, single GPU only)
# ---------------------------------------------------------------------------
polar_express_coeffs = [
(8.156554524902461, -22.48329292557795, 15.878769915207462),
(4.042929935166739, -2.808917465908714, 0.5000178451051316),
(3.8916678022926607, -2.772484153217685, 0.5060648178503393),
(3.285753657755655, -2.3681294933425376, 0.46449024233003106),
(2.3465413258596377, -1.7097828382687081, 0.42323551169305323),
]
@torch.compile(dynamic=False, fullgraph=True)
def adamw_step_fused(p, grad, exp_avg, exp_avg_sq, step_t, lr_t, beta1_t, beta2_t, eps_t, wd_t):
p.mul_(1 - lr_t * wd_t)
exp_avg.lerp_(grad, 1 - beta1_t)
exp_avg_sq.lerp_(grad.square(), 1 - beta2_t)
bias1 = 1 - beta1_t ** step_t
bias2 = 1 - beta2_t ** step_t
denom = (exp_avg_sq / bias2).sqrt() + eps_t
step_size = lr_t / bias1
p.add_(exp_avg / denom, alpha=-step_size)
@torch.compile(dynamic=False, fullgraph=True)
def muon_step_fused(stacked_grads, stacked_params, momentum_buffer, second_momentum_buffer,
momentum_t, lr_t, wd_t, beta2_t, ns_steps, red_dim):
# Nesterov momentum
momentum = momentum_t.to(stacked_grads.dtype)
momentum_buffer.lerp_(stacked_grads, 1 - momentum)
g = stacked_grads.lerp_(momentum_buffer, momentum)
# Polar express orthogonalization
X = g.bfloat16()
X = X / (X.norm(dim=(-2, -1), keepdim=True) * 1.02 + 1e-6)
if g.size(-2) > g.size(-1):
for a, b, c in polar_express_coeffs[:ns_steps]:
A = X.mT @ X
B = b * A + c * (A @ A)
X = a * X + X @ B
else:
for a, b, c in polar_express_coeffs[:ns_steps]:
A = X @ X.mT
B = b * A + c * (A @ A)
X = a * X + B @ X
g = X
# NorMuon variance reduction
beta2 = beta2_t.to(g.dtype)
v_mean = g.float().square().mean(dim=red_dim, keepdim=True)
red_dim_size = g.size(red_dim)
v_norm_sq = v_mean.sum(dim=(-2, -1), keepdim=True) * red_dim_size
v_norm = v_norm_sq.sqrt()
second_momentum_buffer.lerp_(v_mean.to(dtype=second_momentum_buffer.dtype), 1 - beta2)
step_size = second_momentum_buffer.clamp_min(1e-10).rsqrt()
scaled_sq_sum = (v_mean * red_dim_size) * step_size.float().square()
v_norm_new = scaled_sq_sum.sum(dim=(-2, -1), keepdim=True).sqrt()
final_scale = step_size * (v_norm / v_norm_new.clamp_min(1e-10))
g = g * final_scale.to(g.dtype)
# Cautious weight decay + parameter update
lr = lr_t.to(g.dtype)
wd = wd_t.to(g.dtype)
mask = (g * stacked_params) >= 0
stacked_params.sub_(lr * g + lr * wd * stacked_params * mask)
class MuonAdamW(torch.optim.Optimizer):
def __init__(self, param_groups):
super().__init__(param_groups, defaults={})
# 0-D CPU tensors to avoid torch.compile recompilation when values change
self._adamw_step_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
self._adamw_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
self._adamw_beta1_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
self._adamw_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
self._adamw_eps_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
self._adamw_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
self._muon_momentum_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
self._muon_lr_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
self._muon_wd_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
self._muon_beta2_t = torch.tensor(0.0, dtype=torch.float32, device="cpu")
def _step_adamw(self, group):
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
if not state:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['step'] += 1
self._adamw_step_t.fill_(state['step'])
self._adamw_lr_t.fill_(group['lr'])
self._adamw_beta1_t.fill_(group['betas'][0])
self._adamw_beta2_t.fill_(group['betas'][1])
self._adamw_eps_t.fill_(group['eps'])
self._adamw_wd_t.fill_(group['weight_decay'])
adamw_step_fused(p, grad, state['exp_avg'], state['exp_avg_sq'],
self._adamw_step_t, self._adamw_lr_t, self._adamw_beta1_t,
self._adamw_beta2_t, self._adamw_eps_t, self._adamw_wd_t)
def _step_muon(self, group):
params = group['params']
if not params:
return
p = params[0]
state = self.state[p]
num_params = len(params)
shape, device, dtype = p.shape, p.device, p.dtype
if "momentum_buffer" not in state:
state["momentum_buffer"] = torch.zeros(num_params, *shape, dtype=dtype, device=device)
if "second_momentum_buffer" not in state:
state_shape = (num_params, shape[-2], 1) if shape[-2] >= shape[-1] else (num_params, 1, shape[-1])
state["second_momentum_buffer"] = torch.zeros(state_shape, dtype=dtype, device=device)
red_dim = -1 if shape[-2] >= shape[-1] else -2
stacked_grads = torch.stack([p.grad for p in params])
stacked_params = torch.stack(params)
self._muon_momentum_t.fill_(group["momentum"])
self._muon_beta2_t.fill_(group["beta2"] if group["beta2"] is not None else 0.0)
self._muon_lr_t.fill_(group["lr"] * max(1.0, shape[-2] / shape[-1])**0.5)
self._muon_wd_t.fill_(group["weight_decay"])
muon_step_fused(stacked_grads, stacked_params,
state["momentum_buffer"], state["second_momentum_buffer"],
self._muon_momentum_t, self._muon_lr_t, self._muon_wd_t,
self._muon_beta2_t, group["ns_steps"], red_dim)
torch._foreach_copy_(params, list(stacked_params.unbind(0)))
@torch.no_grad()
def step(self):
for group in self.param_groups:
if group['kind'] == 'adamw':
self._step_adamw(group)
elif group['kind'] == 'muon':
self._step_muon(group)
# ---------------------------------------------------------------------------
# Hyperparameters (edit these directly, no CLI flags needed)
# ---------------------------------------------------------------------------
# Model architecture
ASPECT_RATIO = 64 # model_dim = depth * ASPECT_RATIO
HEAD_DIM = 128 # target head dimension for attention
WINDOW_PATTERN = "SSSL" # sliding window pattern: L=full, S=half context
# Optimization
TOTAL_BATCH_SIZE = 2**19 # ~524K tokens per optimizer step
EMBEDDING_LR = 0.6 # learning rate for token embeddings (Adam)
UNEMBEDDING_LR = 0.004 # learning rate for lm_head (Adam)
MATRIX_LR = 0.04 # learning rate for matrix parameters (Muon)
SCALAR_LR = 0.5 # learning rate for per-layer scalars (Adam)
WEIGHT_DECAY = 0.2 # cautious weight decay for Muon
ADAM_BETAS = (0.8, 0.95) # Adam beta1, beta2
WARMUP_RATIO = 0.0 # fraction of time budget for LR warmup
WARMDOWN_RATIO = 0.5 # fraction of time budget for LR warmdown
FINAL_LR_FRAC = 0.0 # final LR as fraction of initial
# Model size
DEPTH = 8 # number of transformer layers
DEVICE_BATCH_SIZE = 128 # per-device batch size (reduce if OOM)
# ---------------------------------------------------------------------------
# Setup: tokenizer, model, optimizer, dataloader
# ---------------------------------------------------------------------------
t_start = time.time()
torch.manual_seed(42)
torch.cuda.manual_seed(42)
torch.set_float32_matmul_precision("high")
device = torch.device("cuda")
autocast_ctx = torch.amp.autocast(device_type="cuda", dtype=torch.bfloat16)
H100_BF16_PEAK_FLOPS = 989.5e12
tokenizer = Tokenizer.from_directory()
vocab_size = tokenizer.get_vocab_size()
print(f"Vocab size: {vocab_size:,}")
def build_model_config(depth):
base_dim = depth * ASPECT_RATIO
model_dim = ((base_dim + HEAD_DIM - 1) // HEAD_DIM) * HEAD_DIM
num_heads = model_dim // HEAD_DIM
return GPTConfig(
sequence_len=MAX_SEQ_LEN, vocab_size=vocab_size,
n_layer=depth, n_head=num_heads, n_kv_head=num_heads, n_embd=model_dim,
window_pattern=WINDOW_PATTERN,
)
config = build_model_config(DEPTH)
print(f"Model config: {asdict(config)}")
with torch.device("meta"):
model = GPT(config)
model.to_empty(device=device)
model.init_weights()
param_counts = model.num_scaling_params()
print("Parameter counts:")
for key, value in param_counts.items():
print(f" {key:24s}: {value:,}")
num_params = param_counts['total']
num_flops_per_token = model.estimate_flops()
print(f"Estimated FLOPs per token: {num_flops_per_token:e}")
tokens_per_fwdbwd = DEVICE_BATCH_SIZE * MAX_SEQ_LEN
assert TOTAL_BATCH_SIZE % tokens_per_fwdbwd == 0
grad_accum_steps = TOTAL_BATCH_SIZE // tokens_per_fwdbwd
optimizer = model.setup_optimizer(
unembedding_lr=UNEMBEDDING_LR,
embedding_lr=EMBEDDING_LR,
scalar_lr=SCALAR_LR,
adam_betas=ADAM_BETAS,
matrix_lr=MATRIX_LR,
weight_decay=WEIGHT_DECAY,
)
model = torch.compile(model, dynamic=False)
train_loader = make_dataloader(tokenizer, DEVICE_BATCH_SIZE, MAX_SEQ_LEN, "train")
x, y, epoch = next(train_loader) # prefetch first batch
print(f"Time budget: {TIME_BUDGET}s")
print(f"Gradient accumulation steps: {grad_accum_steps}")
# Schedules (all based on progress = training_time / TIME_BUDGET)
def get_lr_multiplier(progress):
if progress < WARMUP_RATIO:
return progress / WARMUP_RATIO if WARMUP_RATIO > 0 else 1.0
elif progress < 1.0 - WARMDOWN_RATIO:
return 1.0
else:
cooldown = (1.0 - progress) / WARMDOWN_RATIO
return cooldown * 1.0 + (1 - cooldown) * FINAL_LR_FRAC
def get_muon_momentum(step):
frac = min(step / 300, 1)
return (1 - frac) * 0.85 + frac * 0.95
def get_weight_decay(progress):
return WEIGHT_DECAY * (1 - progress)
# ---------------------------------------------------------------------------
# Training loop
# ---------------------------------------------------------------------------
t_start_training = time.time()
smooth_train_loss = 0
total_training_time = 0
step = 0
while True:
torch.cuda.synchronize()
t0 = time.time()
for micro_step in range(grad_accum_steps):
with autocast_ctx:
loss = model(x, y)
train_loss = loss.detach()
loss = loss / grad_accum_steps
loss.backward()
x, y, epoch = next(train_loader)
# Progress and schedules
progress = min(total_training_time / TIME_BUDGET, 1.0)
lrm = get_lr_multiplier(progress)
muon_momentum = get_muon_momentum(step)
muon_weight_decay = get_weight_decay(progress)
for group in optimizer.param_groups:
group["lr"] = group["initial_lr"] * lrm
if group['kind'] == 'muon':
group["momentum"] = muon_momentum
group["weight_decay"] = muon_weight_decay
optimizer.step()
model.zero_grad(set_to_none=True)
train_loss_f = train_loss.item()
# Fast fail: abort if loss is exploding or NaN
if math.isnan(train_loss_f) or train_loss_f > 100:
print("FAIL")
exit(1)
torch.cuda.synchronize()
t1 = time.time()
dt = t1 - t0
if step > 10:
total_training_time += dt
# Logging
ema_beta = 0.9
smooth_train_loss = ema_beta * smooth_train_loss + (1 - ema_beta) * train_loss_f
debiased_smooth_loss = smooth_train_loss / (1 - ema_beta**(step + 1))
pct_done = 100 * progress
tok_per_sec = int(TOTAL_BATCH_SIZE / dt)
mfu = 100 * num_flops_per_token * TOTAL_BATCH_SIZE / dt / H100_BF16_PEAK_FLOPS
remaining = max(0, TIME_BUDGET - total_training_time)
print(f"\rstep {step:05d} ({pct_done:.1f}%) | loss: {debiased_smooth_loss:.6f} | lrm: {lrm:.2f} | dt: {dt*1000:.0f}ms | tok/sec: {tok_per_sec:,} | mfu: {mfu:.1f}% | epoch: {epoch} | remaining: {remaining:.0f}s ", end="", flush=True)
# GC management (Python's GC causes ~500ms stalls)
if step == 0:
gc.collect()
gc.freeze()
gc.disable()
elif (step + 1) % 5000 == 0:
gc.collect()
step += 1
# Time's up — but only stop after warmup steps so we don't count compilation
if step > 10 and total_training_time >= TIME_BUDGET:
break
print() # newline after \r training log
total_tokens = step * TOTAL_BATCH_SIZE
# Final eval
model.eval()
with autocast_ctx:
val_bpb = evaluate_bpb(model, tokenizer, DEVICE_BATCH_SIZE)
# Final summary
t_end = time.time()
startup_time = t_start_training - t_start
steady_state_mfu = 100 * num_flops_per_token * TOTAL_BATCH_SIZE * (step - 10) / total_training_time / H100_BF16_PEAK_FLOPS if total_training_time > 0 else 0
peak_vram_mb = torch.cuda.max_memory_allocated() / 1024 / 1024
print("---")
print(f"val_bpb: {val_bpb:.6f}")
print(f"training_seconds: {total_training_time:.1f}")
print(f"total_seconds: {t_end - t_start:.1f}")
print(f"peak_vram_mb: {peak_vram_mb:.1f}")
print(f"mfu_percent: {steady_state_mfu:.2f}")
print(f"total_tokens_M: {total_tokens / 1e6:.1f}")
print(f"num_steps: {step}")
print(f"num_params_M: {num_params / 1e6:.1f}")
print(f"depth: {DEPTH}") | --- +++ @@ -1,3 +1,8 @@+"""
+Autoresearch pretraining script. Single-GPU, single-file.
+Cherry-picked and simplified from nanochat.
+Usage: uv run train.py
+"""
import os
os.environ["PYTORCH_ALLOC_CONF"] = "expandable_segments:True"
@@ -40,6 +45,7 @@
def has_ve(layer_idx, n_layer):
+ """Returns True if layer should have Value Embedding (alternating, last always included)."""
return layer_idx % 2 == (n_layer - 1) % 2
@@ -200,6 +206,7 @@ return window_sizes
def estimate_flops(self):
+ """Estimated FLOPs per token (forward + backward)."""
nparams = sum(p.numel() for p in self.parameters())
value_embeds_numel = sum(ve.weight.numel() for ve in self.value_embeds.values())
nparams_exclude = (self.transformer.wte.weight.numel() + value_embeds_numel +
@@ -347,6 +354,7 @@
class MuonAdamW(torch.optim.Optimizer):
+ """Combined optimizer: Muon for 2D matrix params, AdamW for others."""
def __init__(self, param_groups):
super().__init__(param_groups, defaults={})
@@ -619,4 +627,4 @@ print(f"total_tokens_M: {total_tokens / 1e6:.1f}")
print(f"num_steps: {step}")
print(f"num_params_M: {num_params / 1e6:.1f}")
-print(f"depth: {DEPTH}")+print(f"depth: {DEPTH}")
| https://raw.githubusercontent.com/karpathy/autoresearch/HEAD/train.py |
Add structured docstrings to improve clarity |
import os
import sys
import time
import math
import argparse
import pickle
from multiprocessing import Pool
import requests
import pyarrow.parquet as pq
import rustbpe
import tiktoken
import torch
# ---------------------------------------------------------------------------
# Constants (fixed, do not modify)
# ---------------------------------------------------------------------------
MAX_SEQ_LEN = 2048 # context length
TIME_BUDGET = 300 # training time budget in seconds (5 minutes)
EVAL_TOKENS = 40 * 524288 # number of tokens for val eval
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
CACHE_DIR = os.path.join(os.path.expanduser("~"), ".cache", "autoresearch")
DATA_DIR = os.path.join(CACHE_DIR, "data")
TOKENIZER_DIR = os.path.join(CACHE_DIR, "tokenizer")
BASE_URL = "https://huggingface.co/datasets/karpathy/climbmix-400b-shuffle/resolve/main"
MAX_SHARD = 6542 # the last datashard is shard_06542.parquet
VAL_SHARD = MAX_SHARD # pinned validation shard (shard_06542)
VAL_FILENAME = f"shard_{VAL_SHARD:05d}.parquet"
VOCAB_SIZE = 8192
# BPE split pattern (GPT-4 style, with \p{N}{1,2} instead of {1,3})
SPLIT_PATTERN = r"""'(?i:[sdmt]|ll|ve|re)|[^\r\n\p{L}\p{N}]?+\p{L}+|\p{N}{1,2}| ?[^\s\p{L}\p{N}]++[\r\n]*|\s*[\r\n]|\s+(?!\S)|\s+"""
SPECIAL_TOKENS = [f"<|reserved_{i}|>" for i in range(4)]
BOS_TOKEN = "<|reserved_0|>"
# ---------------------------------------------------------------------------
# Data download
# ---------------------------------------------------------------------------
def download_single_shard(index):
filename = f"shard_{index:05d}.parquet"
filepath = os.path.join(DATA_DIR, filename)
if os.path.exists(filepath):
return True
url = f"{BASE_URL}/{filename}"
max_attempts = 5
for attempt in range(1, max_attempts + 1):
try:
response = requests.get(url, stream=True, timeout=30)
response.raise_for_status()
temp_path = filepath + ".tmp"
with open(temp_path, "wb") as f:
for chunk in response.iter_content(chunk_size=1024 * 1024):
if chunk:
f.write(chunk)
os.rename(temp_path, filepath)
print(f" Downloaded {filename}")
return True
except (requests.RequestException, IOError) as e:
print(f" Attempt {attempt}/{max_attempts} failed for {filename}: {e}")
for path in [filepath + ".tmp", filepath]:
if os.path.exists(path):
try:
os.remove(path)
except OSError:
pass
if attempt < max_attempts:
time.sleep(2 ** attempt)
return False
def download_data(num_shards, download_workers=8):
os.makedirs(DATA_DIR, exist_ok=True)
num_train = min(num_shards, MAX_SHARD)
ids = list(range(num_train))
if VAL_SHARD not in ids:
ids.append(VAL_SHARD)
# Count what's already downloaded
existing = sum(1 for i in ids if os.path.exists(os.path.join(DATA_DIR, f"shard_{i:05d}.parquet")))
if existing == len(ids):
print(f"Data: all {len(ids)} shards already downloaded at {DATA_DIR}")
return
needed = len(ids) - existing
print(f"Data: downloading {needed} shards ({existing} already exist)...")
workers = max(1, min(download_workers, needed))
with Pool(processes=workers) as pool:
results = pool.map(download_single_shard, ids)
ok = sum(1 for r in results if r)
print(f"Data: {ok}/{len(ids)} shards ready at {DATA_DIR}")
# ---------------------------------------------------------------------------
# Tokenizer training
# ---------------------------------------------------------------------------
def list_parquet_files():
files = sorted(f for f in os.listdir(DATA_DIR) if f.endswith(".parquet") and not f.endswith(".tmp"))
return [os.path.join(DATA_DIR, f) for f in files]
def text_iterator(max_chars=1_000_000_000, doc_cap=10_000):
parquet_paths = [p for p in list_parquet_files() if not p.endswith(VAL_FILENAME)]
nchars = 0
for filepath in parquet_paths:
pf = pq.ParquetFile(filepath)
for rg_idx in range(pf.num_row_groups):
rg = pf.read_row_group(rg_idx)
for text in rg.column("text").to_pylist():
doc = text[:doc_cap] if len(text) > doc_cap else text
nchars += len(doc)
yield doc
if nchars >= max_chars:
return
def train_tokenizer():
tokenizer_pkl = os.path.join(TOKENIZER_DIR, "tokenizer.pkl")
token_bytes_path = os.path.join(TOKENIZER_DIR, "token_bytes.pt")
if os.path.exists(tokenizer_pkl) and os.path.exists(token_bytes_path):
print(f"Tokenizer: already trained at {TOKENIZER_DIR}")
return
os.makedirs(TOKENIZER_DIR, exist_ok=True)
parquet_files = list_parquet_files()
if len(parquet_files) < 2:
print("Tokenizer: need at least 2 data shards (1 train + 1 val). Download more data first.")
sys.exit(1)
# --- Train with rustbpe ---
print("Tokenizer: training BPE tokenizer...")
t0 = time.time()
tokenizer = rustbpe.Tokenizer()
vocab_size_no_special = VOCAB_SIZE - len(SPECIAL_TOKENS)
tokenizer.train_from_iterator(text_iterator(), vocab_size_no_special, pattern=SPLIT_PATTERN)
# Build tiktoken encoding from trained merges
pattern = tokenizer.get_pattern()
mergeable_ranks = {bytes(k): v for k, v in tokenizer.get_mergeable_ranks()}
tokens_offset = len(mergeable_ranks)
special_tokens = {name: tokens_offset + i for i, name in enumerate(SPECIAL_TOKENS)}
enc = tiktoken.Encoding(
name="rustbpe",
pat_str=pattern,
mergeable_ranks=mergeable_ranks,
special_tokens=special_tokens,
)
# Save tokenizer
with open(tokenizer_pkl, "wb") as f:
pickle.dump(enc, f)
t1 = time.time()
print(f"Tokenizer: trained in {t1 - t0:.1f}s, saved to {tokenizer_pkl}")
# --- Build token_bytes lookup for BPB evaluation ---
print("Tokenizer: building token_bytes lookup...")
special_set = set(SPECIAL_TOKENS)
token_bytes_list = []
for token_id in range(enc.n_vocab):
token_str = enc.decode([token_id])
if token_str in special_set:
token_bytes_list.append(0)
else:
token_bytes_list.append(len(token_str.encode("utf-8")))
token_bytes_tensor = torch.tensor(token_bytes_list, dtype=torch.int32)
torch.save(token_bytes_tensor, token_bytes_path)
print(f"Tokenizer: saved token_bytes to {token_bytes_path}")
# Sanity check
test = "Hello world! Numbers: 123. Unicode: 你好"
encoded = enc.encode_ordinary(test)
decoded = enc.decode(encoded)
assert decoded == test, f"Tokenizer roundtrip failed: {test!r} -> {decoded!r}"
print(f"Tokenizer: sanity check passed (vocab_size={enc.n_vocab})")
# ---------------------------------------------------------------------------
# Runtime utilities (imported by train.py)
# ---------------------------------------------------------------------------
class Tokenizer:
def __init__(self, enc):
self.enc = enc
self.bos_token_id = enc.encode_single_token(BOS_TOKEN)
@classmethod
def from_directory(cls, tokenizer_dir=TOKENIZER_DIR):
with open(os.path.join(tokenizer_dir, "tokenizer.pkl"), "rb") as f:
enc = pickle.load(f)
return cls(enc)
def get_vocab_size(self):
return self.enc.n_vocab
def get_bos_token_id(self):
return self.bos_token_id
def encode(self, text, prepend=None, num_threads=8):
if prepend is not None:
prepend_id = prepend if isinstance(prepend, int) else self.enc.encode_single_token(prepend)
if isinstance(text, str):
ids = self.enc.encode_ordinary(text)
if prepend is not None:
ids.insert(0, prepend_id)
elif isinstance(text, list):
ids = self.enc.encode_ordinary_batch(text, num_threads=num_threads)
if prepend is not None:
for row in ids:
row.insert(0, prepend_id)
else:
raise ValueError(f"Invalid input type: {type(text)}")
return ids
def decode(self, ids):
return self.enc.decode(ids)
def get_token_bytes(device="cpu"):
path = os.path.join(TOKENIZER_DIR, "token_bytes.pt")
with open(path, "rb") as f:
return torch.load(f, map_location=device)
def _document_batches(split, tokenizer_batch_size=128):
parquet_paths = list_parquet_files()
assert len(parquet_paths) > 0, "No parquet files found. Run prepare.py first."
val_path = os.path.join(DATA_DIR, VAL_FILENAME)
if split == "train":
parquet_paths = [p for p in parquet_paths if p != val_path]
assert len(parquet_paths) > 0, "No training shards found."
else:
parquet_paths = [val_path]
epoch = 1
while True:
for filepath in parquet_paths:
pf = pq.ParquetFile(filepath)
for rg_idx in range(pf.num_row_groups):
rg = pf.read_row_group(rg_idx)
batch = rg.column('text').to_pylist()
for i in range(0, len(batch), tokenizer_batch_size):
yield batch[i:i+tokenizer_batch_size], epoch
epoch += 1
def make_dataloader(tokenizer, B, T, split, buffer_size=1000):
assert split in ["train", "val"]
row_capacity = T + 1
batches = _document_batches(split)
bos_token = tokenizer.get_bos_token_id()
doc_buffer = []
epoch = 1
def refill_buffer():
nonlocal epoch
doc_batch, epoch = next(batches)
token_lists = tokenizer.encode(doc_batch, prepend=bos_token)
doc_buffer.extend(token_lists)
# Pre-allocate buffers: [inputs (B*T) | targets (B*T)]
row_buffer = torch.empty((B, row_capacity), dtype=torch.long)
cpu_buffer = torch.empty(2 * B * T, dtype=torch.long, pin_memory=True)
gpu_buffer = torch.empty(2 * B * T, dtype=torch.long, device="cuda")
cpu_inputs = cpu_buffer[:B * T].view(B, T)
cpu_targets = cpu_buffer[B * T:].view(B, T)
inputs = gpu_buffer[:B * T].view(B, T)
targets = gpu_buffer[B * T:].view(B, T)
while True:
for row_idx in range(B):
pos = 0
while pos < row_capacity:
while len(doc_buffer) < buffer_size:
refill_buffer()
remaining = row_capacity - pos
# Find largest doc that fits entirely
best_idx = -1
best_len = 0
for i, doc in enumerate(doc_buffer):
doc_len = len(doc)
if doc_len <= remaining and doc_len > best_len:
best_idx = i
best_len = doc_len
if best_idx >= 0:
doc = doc_buffer.pop(best_idx)
row_buffer[row_idx, pos:pos + len(doc)] = torch.tensor(doc, dtype=torch.long)
pos += len(doc)
else:
# No doc fits — crop shortest to fill remaining
shortest_idx = min(range(len(doc_buffer)), key=lambda i: len(doc_buffer[i]))
doc = doc_buffer.pop(shortest_idx)
row_buffer[row_idx, pos:pos + remaining] = torch.tensor(doc[:remaining], dtype=torch.long)
pos += remaining
cpu_inputs.copy_(row_buffer[:, :-1])
cpu_targets.copy_(row_buffer[:, 1:])
gpu_buffer.copy_(cpu_buffer, non_blocking=True)
yield inputs, targets, epoch
# ---------------------------------------------------------------------------
# Evaluation (DO NOT CHANGE — this is the fixed metric)
# ---------------------------------------------------------------------------
@torch.no_grad()
def evaluate_bpb(model, tokenizer, batch_size):
token_bytes = get_token_bytes(device="cuda")
val_loader = make_dataloader(tokenizer, batch_size, MAX_SEQ_LEN, "val")
steps = EVAL_TOKENS // (batch_size * MAX_SEQ_LEN)
total_nats = 0.0
total_bytes = 0
for _ in range(steps):
x, y, _ = next(val_loader)
loss_flat = model(x, y, reduction='none').view(-1)
y_flat = y.view(-1)
nbytes = token_bytes[y_flat]
mask = nbytes > 0
total_nats += (loss_flat * mask).sum().item()
total_bytes += nbytes.sum().item()
return total_nats / (math.log(2) * total_bytes)
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Prepare data and tokenizer for autoresearch")
parser.add_argument("--num-shards", type=int, default=10, help="Number of training shards to download (-1 = all). Val shard is always pinned.")
parser.add_argument("--download-workers", type=int, default=8, help="Number of parallel download workers")
args = parser.parse_args()
num_shards = MAX_SHARD if args.num_shards == -1 else args.num_shards
print(f"Cache directory: {CACHE_DIR}")
print()
# Step 1: Download data
download_data(num_shards, download_workers=args.download_workers)
print()
# Step 2: Train tokenizer
train_tokenizer()
print()
print("Done! Ready to train.") | --- +++ @@ -1,3 +1,13 @@+"""
+One-time data preparation for autoresearch experiments.
+Downloads data shards and trains a BPE tokenizer.
+
+Usage:
+ python prepare.py # full prep (download + tokenizer)
+ python prepare.py --num-shards 8 # download only 8 shards (for testing)
+
+Data and tokenizer are stored in ~/.cache/autoresearch/.
+"""
import os
import sys
@@ -45,6 +55,7 @@ # ---------------------------------------------------------------------------
def download_single_shard(index):
+ """Download one parquet shard with retries. Returns True on success."""
filename = f"shard_{index:05d}.parquet"
filepath = os.path.join(DATA_DIR, filename)
if os.path.exists(filepath):
@@ -78,6 +89,7 @@
def download_data(num_shards, download_workers=8):
+ """Download training shards + pinned validation shard."""
os.makedirs(DATA_DIR, exist_ok=True)
num_train = min(num_shards, MAX_SHARD)
ids = list(range(num_train))
@@ -105,11 +117,13 @@ # ---------------------------------------------------------------------------
def list_parquet_files():
+ """Return sorted list of parquet file paths in the data directory."""
files = sorted(f for f in os.listdir(DATA_DIR) if f.endswith(".parquet") and not f.endswith(".tmp"))
return [os.path.join(DATA_DIR, f) for f in files]
def text_iterator(max_chars=1_000_000_000, doc_cap=10_000):
+ """Yield documents from training split (all shards except pinned val shard)."""
parquet_paths = [p for p in list_parquet_files() if not p.endswith(VAL_FILENAME)]
nchars = 0
for filepath in parquet_paths:
@@ -125,6 +139,7 @@
def train_tokenizer():
+ """Train BPE tokenizer using rustbpe, save as tiktoken pickle."""
tokenizer_pkl = os.path.join(TOKENIZER_DIR, "tokenizer.pkl")
token_bytes_path = os.path.join(TOKENIZER_DIR, "token_bytes.pt")
@@ -192,6 +207,7 @@ # ---------------------------------------------------------------------------
class Tokenizer:
+ """Minimal tokenizer wrapper. Training is handled above."""
def __init__(self, enc):
self.enc = enc
@@ -236,6 +252,7 @@
def _document_batches(split, tokenizer_batch_size=128):
+ """Infinite iterator over document batches from parquet files."""
parquet_paths = list_parquet_files()
assert len(parquet_paths) > 0, "No parquet files found. Run prepare.py first."
val_path = os.path.join(DATA_DIR, VAL_FILENAME)
@@ -257,6 +274,12 @@
def make_dataloader(tokenizer, B, T, split, buffer_size=1000):
+ """
+ BOS-aligned dataloader with best-fit packing.
+ Every row starts with BOS. Documents packed using best-fit to minimize cropping.
+ When no document fits remaining space, crops shortest doc to fill exactly.
+ 100% utilization (no padding).
+ """
assert split in ["train", "val"]
row_capacity = T + 1
batches = _document_batches(split)
@@ -319,6 +342,13 @@
@torch.no_grad()
def evaluate_bpb(model, tokenizer, batch_size):
+ """
+ Bits per byte (BPB): vocab size-independent evaluation metric.
+ Sums per-token cross-entropy (in nats), sums target byte lengths,
+ then converts nats/byte to bits/byte. Special tokens (byte length 0)
+ are excluded from both sums.
+ Uses fixed MAX_SEQ_LEN so results are comparable across configs.
+ """
token_bytes = get_token_bytes(device="cuda")
val_loader = make_dataloader(tokenizer, batch_size, MAX_SEQ_LEN, "val")
steps = EVAL_TOKENS // (batch_size * MAX_SEQ_LEN)
@@ -356,4 +386,4 @@ # Step 2: Train tokenizer
train_tokenizer()
print()
- print("Done! Ready to train.")+ print("Done! Ready to train.")
| https://raw.githubusercontent.com/karpathy/autoresearch/HEAD/prepare.py |
Write beginner-friendly docstrings |
import ast
import sys
import warnings
from collections.abc import Collection, Iterator
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
from black.nodes import syms
from blib2to3 import pygram
from blib2to3.pgen2 import driver
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.parse import ParseError
from blib2to3.pgen2.tokenize import TokenError
from blib2to3.pytree import Leaf, Node
class InvalidInput(ValueError):
def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]:
if not target_versions:
# No target_version specified, so try all grammars.
return [
# Python 3.7-3.9
pygram.python_grammar_async_keywords,
# Python 3.0-3.6
pygram.python_grammar,
# Python 3.10+
pygram.python_grammar_soft_keywords,
]
grammars = []
# If we have to parse both, try to parse async as a keyword first
if not supports_feature(
target_versions, Feature.ASYNC_IDENTIFIERS
) and not supports_feature(target_versions, Feature.PATTERN_MATCHING):
# Python 3.7-3.9
grammars.append(pygram.python_grammar_async_keywords)
if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
# Python 3.0-3.6
grammars.append(pygram.python_grammar)
if any(Feature.PATTERN_MATCHING in VERSION_TO_FEATURES[v] for v in target_versions):
# Python 3.10+
grammars.append(pygram.python_grammar_soft_keywords)
# At least one of the above branches must have been taken, because every Python
# version has exactly one of the two 'ASYNC_*' flags
return grammars
def lib2to3_parse(
src_txt: str, target_versions: Collection[TargetVersion] = ()
) -> Node:
if not src_txt.endswith("\n"):
src_txt += "\n"
grammars = get_grammars(set(target_versions))
if target_versions:
max_tv = max(target_versions, key=lambda tv: tv.value)
tv_str = f" for target version {max_tv.pretty()}"
else:
tv_str = ""
errors = {}
for grammar in grammars:
drv = driver.Driver(grammar)
try:
result = drv.parse_string(src_txt, False)
break
except ParseError as pe:
lineno, column = pe.context[1]
lines = src_txt.splitlines()
try:
faulty_line = lines[lineno - 1]
except IndexError:
faulty_line = "<line number missing in source>"
errors[grammar.version] = InvalidInput(
f"Cannot parse{tv_str}: {lineno}:{column}: {faulty_line}"
)
except TokenError as te:
# In edge cases these are raised; and typically don't have a "faulty_line".
lineno, column = te.args[1]
errors[grammar.version] = InvalidInput(
f"Cannot parse{tv_str}: {lineno}:{column}: {te.args[0]}"
)
else:
# Choose the latest version when raising the actual parsing error.
assert len(errors) >= 1
exc = errors[max(errors)]
raise exc from None
if isinstance(result, Leaf):
result = Node(syms.file_input, [result])
return result
class ASTSafetyError(Exception):
def _parse_single_version(
src: str, version: tuple[int, int], *, type_comments: bool
) -> ast.AST:
filename = "<unknown>"
with warnings.catch_warnings():
warnings.simplefilter("ignore", SyntaxWarning)
warnings.simplefilter("ignore", DeprecationWarning)
return ast.parse(
src, filename, feature_version=version, type_comments=type_comments
)
def parse_ast(src: str) -> ast.AST:
# TODO: support Python 4+ ;)
versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)]
first_error = ""
for version in sorted(versions, reverse=True):
try:
return _parse_single_version(src, version, type_comments=True)
except SyntaxError as e:
if not first_error:
first_error = str(e)
# Try to parse without type comments
for version in sorted(versions, reverse=True):
try:
return _parse_single_version(src, version, type_comments=False)
except SyntaxError:
pass
raise SyntaxError(first_error)
def _normalize(lineend: str, value: str) -> str:
# To normalize, we strip any leading and trailing space from
# each line...
stripped: list[str] = [i.strip() for i in value.splitlines()]
normalized = lineend.join(stripped)
# ...and remove any blank lines at the beginning and end of
# the whole string
return normalized.strip()
def stringify_ast(node: ast.AST) -> Iterator[str]:
return _stringify_ast(node, [])
def _stringify_ast_with_new_parent(
node: ast.AST, parent_stack: list[ast.AST], new_parent: ast.AST
) -> Iterator[str]:
parent_stack.append(new_parent)
yield from _stringify_ast(node, parent_stack)
parent_stack.pop()
def _stringify_ast(node: ast.AST, parent_stack: list[ast.AST]) -> Iterator[str]:
if (
isinstance(node, ast.Constant)
and isinstance(node.value, str)
and node.kind == "u"
):
# It's a quirk of history that we strip the u prefix over here. We used to
# rewrite the AST nodes for Python version compatibility and we never copied
# over the kind
node.kind = None
yield f"{' ' * len(parent_stack)}{node.__class__.__name__}("
for field in sorted(node._fields):
# TypeIgnore has only one field 'lineno' which breaks this comparison
if isinstance(node, ast.TypeIgnore):
break
try:
value: object = getattr(node, field)
except AttributeError:
continue
yield f"{' ' * (len(parent_stack) + 1)}{field}="
if isinstance(value, list):
for item in value:
# Ignore nested tuples within del statements, because we may insert
# parentheses and they change the AST.
if (
field == "targets"
and isinstance(node, ast.Delete)
and isinstance(item, ast.Tuple)
):
for elt in _unwrap_tuples(item):
yield from _stringify_ast_with_new_parent(
elt, parent_stack, node
)
elif isinstance(item, ast.AST):
yield from _stringify_ast_with_new_parent(item, parent_stack, node)
elif isinstance(value, ast.AST):
yield from _stringify_ast_with_new_parent(value, parent_stack, node)
else:
normalized: object
if (
isinstance(node, ast.Constant)
and field == "value"
and isinstance(value, str)
and len(parent_stack) >= 2
# Any standalone string, ideally this would
# exactly match black.nodes.is_docstring
and isinstance(parent_stack[-1], ast.Expr)
):
# Constant strings may be indented across newlines, if they are
# docstrings; fold spaces after newlines when comparing. Similarly,
# trailing and leading space may be removed.
normalized = _normalize("\n", value)
elif field == "type_comment" and isinstance(value, str):
# Trailing whitespace in type comments is removed.
normalized = value.rstrip()
else:
normalized = value
yield (
f"{' ' * (len(parent_stack) + 1)}{normalized!r}, #"
f" {value.__class__.__name__}"
)
yield f"{' ' * len(parent_stack)}) # /{node.__class__.__name__}"
def _unwrap_tuples(node: ast.Tuple) -> Iterator[ast.AST]:
for elt in node.elts:
if isinstance(elt, ast.Tuple):
yield from _unwrap_tuples(elt)
else:
yield elt | --- +++ @@ -1,3 +1,6 @@+"""
+Parse Python code and perform AST validation.
+"""
import ast
import sys
@@ -15,6 +18,7 @@
class InvalidInput(ValueError):
+ """Raised when input source code fails all parse attempts."""
def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]:
@@ -51,6 +55,7 @@ def lib2to3_parse(
src_txt: str, target_versions: Collection[TargetVersion] = ()
) -> Node:
+ """Given a string with source, return the lib2to3 Node."""
if not src_txt.endswith("\n"):
src_txt += "\n"
@@ -98,6 +103,7 @@
class ASTSafetyError(Exception):
+ """Raised when Black's generated code is not equivalent to the old AST."""
def _parse_single_version(
@@ -145,6 +151,7 @@
def stringify_ast(node: ast.AST) -> Iterator[str]:
+ """Simple visitor generating strings to compare ASTs by content."""
return _stringify_ast(node, [])
@@ -234,4 +241,4 @@ if isinstance(elt, ast.Tuple):
yield from _unwrap_tuples(elt)
else:
- yield elt+ yield elt
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/parsing.py |
Document this code for team use | #!/usr/bin/env python3
from __future__ import annotations
import argparse
import logging
import re
import sys
from datetime import datetime
from pathlib import Path
from subprocess import run
LOG = logging.getLogger(__name__)
NEW_VERSION_CHANGELOG_TEMPLATE = """\
## Unreleased
<!-- PR authors:
Please include the PR number in the changelog entry, not the issue number -->
### Highlights
<!-- Include any especially major or disruptive changes here -->
### Stable style
<!-- Changes that affect Black's stable style -->
### Preview style
<!-- Changes that affect Black's preview style -->
### Configuration
<!-- Changes to how Black can be configured -->
### Packaging
<!-- Changes to how Black is packaged, such as dependency requirements -->
### Parser
<!-- Changes to the parser or to version autodetection -->
### Performance
<!-- Changes that improve Black's performance. -->
### Output
<!-- Changes to Black's terminal output and error messages -->
### _Blackd_
<!-- Changes to blackd -->
### Integrations
<!-- For example, Docker, GitHub Actions, pre-commit, editors -->
### Documentation
<!-- Major changes to documentation and policies. Small docs changes
don't need a changelog entry. -->
"""
class NoGitTagsError(Exception): ...
# TODO: Do better with alpha + beta releases
# Maybe we vendor packaging library
def get_git_tags(versions_only: bool = True) -> list[str]:
cp = run(["git", "tag"], capture_output=True, check=True, encoding="utf8")
if not cp.stdout:
LOG.error(f"Returned no git tags stdout: {cp.stderr}")
raise NoGitTagsError
git_tags = cp.stdout.splitlines()
if versions_only:
return [t for t in git_tags if t[0].isdigit()]
return git_tags
# TODO: Support sorting alhpa/beta releases correctly
def tuple_calver(calver: str) -> tuple[int, ...]: # mypy can't notice maxsplit below
try:
return tuple(map(int, calver.split(".", maxsplit=2)))
except ValueError:
return (0, 0, 0)
class SourceFiles:
def __init__(self, black_repo_dir: Path):
# File path fun all pathlib to be platform agnostic
self.black_repo_path = black_repo_dir
self.changes_path = self.black_repo_path / "CHANGES.md"
self.docs_path = self.black_repo_path / "docs"
self.version_doc_paths = (
self.docs_path / "integrations" / "source_version_control.md",
self.docs_path / "usage_and_configuration" / "the_basics.md",
self.docs_path / "guides" / "using_black_with_jupyter_notebooks.md",
)
self.current_version = self.get_current_version()
self.next_version = self.get_next_version()
def __str__(self) -> str:
return f"""\
> SourceFiles ENV:
Repo path: {self.black_repo_path}
CHANGES.md path: {self.changes_path}
docs path: {self.docs_path}
Current version: {self.current_version}
Next version: {self.next_version}
"""
def add_template_to_changes(self) -> int:
LOG.info(f"Adding template to {self.changes_path}")
with self.changes_path.open("r", encoding="utf-8") as cfp:
changes_string = cfp.read()
if "## Unreleased" in changes_string:
LOG.error(f"{self.changes_path} already has unreleased template")
return 1
templated_changes_string = changes_string.replace(
"# Change Log\n",
f"# Change Log\n\n{NEW_VERSION_CHANGELOG_TEMPLATE}",
)
with self.changes_path.open("w", encoding="utf-8") as cfp:
cfp.write(templated_changes_string)
LOG.info(f"Added template to {self.changes_path}")
return 0
def cleanup_changes_template_for_release(self) -> None:
LOG.info(f"Cleaning up {self.changes_path}")
with self.changes_path.open("r", encoding="utf-8") as cfp:
changes_string = cfp.read()
# Change Unreleased to next version
changes_string = changes_string.replace(
"## Unreleased", f"## {self.next_version}"
)
# Remove all comments
changes_string = re.sub(r"(?m)^<!--(?>(?:.|\n)*?-->)\n\n", "", changes_string)
# Remove empty subheadings
changes_string = re.sub(r"(?m)^###.+\n\n(?=#)", "", changes_string)
with self.changes_path.open("w", encoding="utf-8") as cfp:
cfp.write(changes_string)
LOG.debug(f"Finished Cleaning up {self.changes_path}")
def get_current_version(self) -> str:
return sorted(get_git_tags(), key=lambda k: tuple_calver(k))[-1]
def get_next_version(self) -> str:
base_calver = datetime.today().strftime("%y.%m")
calver_parts = base_calver.split(".")
base_calver = f"{calver_parts[0]}.{int(calver_parts[1])}" # Remove leading 0
git_tags = get_git_tags()
same_month_releases = [
t for t in git_tags if t.startswith(base_calver) and "a" not in t
]
if len(same_month_releases) < 1:
return f"{base_calver}.0"
same_month_version = same_month_releases[-1].split(".", 2)[-1]
return f"{base_calver}.{int(same_month_version) + 1}"
def update_repo_for_release(self) -> int:
self.cleanup_changes_template_for_release()
self.update_version_in_docs()
return 0 # return 0 if no exceptions hit
def update_version_in_docs(self) -> None:
for doc_path in self.version_doc_paths:
LOG.info(f"Updating black version to {self.next_version} in {doc_path}")
with doc_path.open("r", encoding="utf-8") as dfp:
doc_string = dfp.read()
next_version_doc = doc_string.replace(
self.current_version, self.next_version
)
with doc_path.open("w", encoding="utf-8") as dfp:
dfp.write(next_version_doc)
LOG.debug(
f"Finished updating black version to {self.next_version} in {doc_path}"
)
def _handle_debug(debug: bool) -> None:
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)",
level=log_level,
)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--add-changes-template",
action="store_true",
help="Add the Unreleased template to CHANGES.md",
)
parser.add_argument(
"-d", "--debug", action="store_true", help="Verbose debug output"
)
args = parser.parse_args()
_handle_debug(args.debug)
return args
def main() -> int:
args = parse_args()
# Need parent.parent cause script is in scripts/ directory
sf = SourceFiles(Path(__file__).parent.parent)
if args.add_changes_template:
return sf.add_template_to_changes()
LOG.info(f"Current version detected to be {sf.current_version}")
LOG.info(f"Next version will be {sf.next_version}")
return sf.update_repo_for_release()
if __name__ == "__main__": # pragma: no cover
sys.exit(main()) | --- +++ @@ -1,5 +1,8 @@ #!/usr/bin/env python3
+"""
+Tool to help automate changes needed in commits during and after releases
+"""
from __future__ import annotations
@@ -71,6 +74,7 @@ # TODO: Do better with alpha + beta releases
# Maybe we vendor packaging library
def get_git_tags(versions_only: bool = True) -> list[str]:
+ """Pull out all tags or calvers only"""
cp = run(["git", "tag"], capture_output=True, check=True, encoding="utf8")
if not cp.stdout:
LOG.error(f"Returned no git tags stdout: {cp.stderr}")
@@ -83,6 +87,7 @@
# TODO: Support sorting alhpa/beta releases correctly
def tuple_calver(calver: str) -> tuple[int, ...]: # mypy can't notice maxsplit below
+ """Convert a calver string into a tuple of ints for sorting"""
try:
return tuple(map(int, calver.split(".", maxsplit=2)))
except ValueError:
@@ -114,6 +119,7 @@ """
def add_template_to_changes(self) -> int:
+ """Add the template to CHANGES.md if it does not exist"""
LOG.info(f"Adding template to {self.changes_path}")
with self.changes_path.open("r", encoding="utf-8") as cfp:
@@ -157,9 +163,11 @@ LOG.debug(f"Finished Cleaning up {self.changes_path}")
def get_current_version(self) -> str:
+ """Get the latest git (version) tag as latest version"""
return sorted(get_git_tags(), key=lambda k: tuple_calver(k))[-1]
def get_next_version(self) -> str:
+ """Workout the year and month + version number we need to move to"""
base_calver = datetime.today().strftime("%y.%m")
calver_parts = base_calver.split(".")
base_calver = f"{calver_parts[0]}.{int(calver_parts[1])}" # Remove leading 0
@@ -173,6 +181,7 @@ return f"{base_calver}.{int(same_month_version) + 1}"
def update_repo_for_release(self) -> int:
+ """Update CHANGES.md + doc files ready for release"""
self.cleanup_changes_template_for_release()
self.update_version_in_docs()
return 0 # return 0 if no exceptions hit
@@ -197,6 +206,7 @@
def _handle_debug(debug: bool) -> None:
+ """Turn on debugging if asked otherwise INFO default"""
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(
format="[%(asctime)s] %(levelname)s: %(message)s (%(filename)s:%(lineno)d)",
@@ -235,4 +245,4 @@
if __name__ == "__main__": # pragma: no cover
- sys.exit(main())+ sys.exit(main())
| https://raw.githubusercontent.com/psf/black/HEAD/scripts/release.py |
Create docstrings for reusable components |
import re
import sys
from functools import lru_cache
from re import Match, Pattern
from typing import Final
from black._width_table import WIDTH_TABLE
from blib2to3.pytree import Leaf
STRING_PREFIX_CHARS: Final = "fturbFTURB" # All possible string prefix characters.
STRING_PREFIX_RE: Final = re.compile(
r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL
)
UNICODE_ESCAPE_RE: Final = re.compile(
r"(?P<backslashes>\\+)(?P<body>"
r"(u(?P<u>[a-fA-F0-9]{4}))" # Character with 16-bit hex value xxxx
r"|(U(?P<U>[a-fA-F0-9]{8}))" # Character with 32-bit hex value xxxxxxxx
r"|(x(?P<x>[a-fA-F0-9]{2}))" # Character with hex value hh
r"|(N\{(?P<N>[a-zA-Z0-9 \-]{2,})\})" # Character named name in the Unicode database
r")",
re.VERBOSE,
)
def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
return regex.sub(replacement, regex.sub(replacement, original))
def has_triple_quotes(string: str) -> bool:
raw_string = string.lstrip(STRING_PREFIX_CHARS)
return raw_string[:3] in {'"""', "'''"}
def lines_with_leading_tabs_expanded(s: str) -> list[str]:
lines = []
for line in s.splitlines():
stripped_line = line.lstrip()
if not stripped_line or stripped_line == line:
lines.append(line)
else:
prefix_length = len(line) - len(stripped_line)
prefix = line[:prefix_length].expandtabs()
lines.append(prefix + stripped_line)
if s.endswith("\n"):
lines.append("")
return lines
def fix_multiline_docstring(docstring: str, prefix: str) -> str:
# https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
assert docstring, "INTERNAL ERROR: Multiline docstrings cannot be empty"
lines = lines_with_leading_tabs_expanded(docstring)
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
last_line_idx = len(lines) - 2
for i, line in enumerate(lines[1:]):
stripped_line = line[indent:].rstrip()
if stripped_line or i == last_line_idx:
trimmed.append(prefix + stripped_line)
else:
trimmed.append("")
return "\n".join(trimmed)
def get_string_prefix(string: str) -> str:
assert_is_leaf_string(string)
prefix = []
for char in string:
if char in STRING_PREFIX_CHARS:
prefix.append(char)
else:
break
return "".join(prefix)
def assert_is_leaf_string(string: str) -> None:
dquote_idx = string.find('"')
squote_idx = string.find("'")
if -1 in [dquote_idx, squote_idx]:
quote_idx = max(dquote_idx, squote_idx)
else:
quote_idx = min(squote_idx, dquote_idx)
assert (
0 <= quote_idx < len(string) - 1
), f"{string!r} is missing a starting quote character (' or \")."
assert string[-1] in (
"'",
'"',
), f"{string!r} is missing an ending quote character (' or \")."
assert set(string[:quote_idx]).issubset(
set(STRING_PREFIX_CHARS)
), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}."
def normalize_string_prefix(s: str) -> str:
match = STRING_PREFIX_RE.match(s)
assert match is not None, f"failed to match string {s!r}"
orig_prefix = match.group(1)
new_prefix = (
orig_prefix.replace("F", "f")
.replace("B", "b")
.replace("U", "")
.replace("u", "")
)
# Python syntax guarantees max 2 prefixes and that one of them is "r"
if len(new_prefix) == 2 and new_prefix[0].lower() != "r":
new_prefix = new_prefix[::-1]
return f"{new_prefix}{match.group(2)}"
# Re(gex) does actually cache patterns internally but this still improves
# performance on a long list literal of strings by 5-9% since lru_cache's
# caching overhead is much lower.
@lru_cache(maxsize=64)
def _cached_compile(pattern: str) -> Pattern[str]:
return re.compile(pattern)
def normalize_string_quotes(s: str) -> str:
value = s.lstrip(STRING_PREFIX_CHARS)
if value[:3] == '"""':
return s
elif value[:3] == "'''":
orig_quote = "'''"
new_quote = '"""'
elif value[0] == '"':
orig_quote = '"'
new_quote = "'"
else:
orig_quote = "'"
new_quote = '"'
first_quote_pos = s.find(orig_quote)
assert first_quote_pos != -1, f"INTERNAL ERROR: Malformed string {s!r}"
prefix = s[:first_quote_pos]
unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)]
if "r" in prefix.casefold():
if unescaped_new_quote.search(body):
# There's at least one unescaped new_quote in this raw string
# so converting is impossible
return s
# Do not introduce or remove backslashes in raw strings
new_body = body
else:
# remove unnecessary escapes
new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
if body != new_body:
# Consider the string without unnecessary escapes as the original
body = new_body
s = f"{prefix}{orig_quote}{body}{orig_quote}"
new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
if "f" in prefix.casefold():
matches = re.findall(
r"""
(?:(?<!\{)|^)\{ # start of the string or a non-{ followed by a single {
([^{].*?) # contents of the brackets except if begins with {{
\}(?:(?!\})|$) # A } followed by end of the string or a non-}
""",
new_body,
re.VERBOSE,
)
for m in matches:
if "\\" in str(m):
# Do not introduce backslashes in interpolated expressions
return s
if new_quote == '"""' and new_body[-1:] == '"':
# edge case:
new_body = new_body[:-1] + '\\"'
orig_escape_count = body.count("\\")
new_escape_count = new_body.count("\\")
if new_escape_count > orig_escape_count:
return s # Do not introduce more escaping
if new_escape_count == orig_escape_count and orig_quote == '"':
return s # Prefer double quotes
return f"{prefix}{new_quote}{new_body}{new_quote}"
def normalize_fstring_quotes(
quote: str,
middles: list[Leaf],
is_raw_fstring: bool,
) -> tuple[list[Leaf], str]:
if quote == '"""':
return middles, quote
elif quote == "'''":
new_quote = '"""'
elif quote == '"':
new_quote = "'"
else:
new_quote = '"'
unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){quote}")
if is_raw_fstring:
for middle in middles:
if unescaped_new_quote.search(middle.value):
# There's at least one unescaped new_quote in this raw string
# so converting is impossible
return middles, quote
# Do not introduce or remove backslashes in raw strings, just use double quote
return middles, '"'
new_segments = []
for middle in middles:
segment = middle.value
# remove unnecessary escapes
new_segment = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", segment)
if segment != new_segment:
# Consider the string without unnecessary escapes as the original
middle.value = new_segment
new_segment = sub_twice(escaped_orig_quote, rf"\1\2{quote}", new_segment)
new_segment = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_segment)
new_segments.append(new_segment)
if new_quote == '"""' and new_segments[-1].endswith('"'):
# edge case:
new_segments[-1] = new_segments[-1][:-1] + '\\"'
orig_escape_count = 0
new_escape_count = 0
for middle, new_segment in zip(middles, new_segments, strict=True):
orig_escape_count += middle.value.count("\\")
new_escape_count += new_segment.count("\\")
if new_escape_count > orig_escape_count:
return middles, quote # Do not introduce more escaping
if new_escape_count == orig_escape_count and quote == '"':
return middles, quote # Prefer double quotes
for middle, new_segment in zip(middles, new_segments, strict=True):
middle.value = new_segment
return middles, new_quote
def normalize_unicode_escape_sequences(leaf: Leaf) -> None:
text = leaf.value
prefix = get_string_prefix(text)
if "r" in prefix.lower():
return
def replace(m: Match[str]) -> str:
groups = m.groupdict()
back_slashes = groups["backslashes"]
if len(back_slashes) % 2 == 0:
return back_slashes + groups["body"]
if groups["u"]:
# \u
return back_slashes + "u" + groups["u"].lower()
elif groups["U"]:
# \U
return back_slashes + "U" + groups["U"].lower()
elif groups["x"]:
# \x
return back_slashes + "x" + groups["x"].lower()
else:
assert groups["N"], f"Unexpected match: {m}"
# \N{}
return back_slashes + "N{" + groups["N"].upper() + "}"
leaf.value = re.sub(UNICODE_ESCAPE_RE, replace, text)
@lru_cache(maxsize=4096)
def char_width(char: str) -> int:
table = WIDTH_TABLE
codepoint = ord(char)
highest = len(table) - 1
lowest = 0
idx = highest // 2
while True:
start_codepoint, end_codepoint, width = table[idx]
if codepoint < start_codepoint:
highest = idx - 1
elif codepoint > end_codepoint:
lowest = idx + 1
else:
return 0 if width < 0 else width
if highest < lowest:
break
idx = (highest + lowest) // 2
return 1
def str_width(line_str: str) -> int:
if line_str.isascii():
# Fast path for a line consisting of only ASCII characters
return len(line_str)
return sum(map(char_width, line_str))
def count_chars_in_width(line_str: str, max_width: int) -> int:
total_width = 0
for i, char in enumerate(line_str):
width = char_width(char)
if width + total_width > max_width:
return i
total_width += width
return len(line_str) | --- +++ @@ -1,3 +1,6 @@+"""
+Simple formatting on strings. Further string formatting code is in trans.py.
+"""
import re
import sys
@@ -24,15 +27,28 @@
def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
+ """Replace `regex` with `replacement` twice on `original`.
+
+ This is used by string normalization to perform replaces on
+ overlapping matches.
+ """
return regex.sub(replacement, regex.sub(replacement, original))
def has_triple_quotes(string: str) -> bool:
+ """
+ Returns:
+ True iff @string starts with three quotation characters.
+ """
raw_string = string.lstrip(STRING_PREFIX_CHARS)
return raw_string[:3] in {'"""', "'''"}
def lines_with_leading_tabs_expanded(s: str) -> list[str]:
+ """
+ Splits string into lines and expands only leading tabs (following the normal
+ Python rules)
+ """
lines = []
for line in s.splitlines():
stripped_line = line.lstrip()
@@ -71,6 +87,13 @@
def get_string_prefix(string: str) -> str:
+ """
+ Pre-conditions:
+ * assert_is_leaf_string(@string)
+
+ Returns:
+ @string's prefix (e.g. '', 'r', 'f', or 'rf').
+ """
assert_is_leaf_string(string)
prefix = []
@@ -83,6 +106,21 @@
def assert_is_leaf_string(string: str) -> None:
+ """
+ Checks the pre-condition that @string has the format that you would expect
+ of `leaf.value` where `leaf` is some Leaf such that `leaf.type ==
+ token.STRING`. A more precise description of the pre-conditions that are
+ checked are listed below.
+
+ Pre-conditions:
+ * @string starts with either ', ", <prefix>', or <prefix>" where
+ `set(<prefix>)` is some subset of `set(STRING_PREFIX_CHARS)`.
+ * @string ends with a quote character (' or ").
+
+ Raises:
+ AssertionError(...) if the pre-conditions listed above are not
+ satisfied.
+ """
dquote_idx = string.find('"')
squote_idx = string.find("'")
if -1 in [dquote_idx, squote_idx]:
@@ -103,6 +141,7 @@
def normalize_string_prefix(s: str) -> str:
+ """Make all string prefixes lowercase."""
match = STRING_PREFIX_RE.match(s)
assert match is not None, f"failed to match string {s!r}"
orig_prefix = match.group(1)
@@ -128,6 +167,10 @@
def normalize_string_quotes(s: str) -> str:
+ """Prefer double quotes but only if it doesn't cause more escaping.
+
+ Adds or removes backslashes as appropriate.
+ """
value = s.lstrip(STRING_PREFIX_CHARS)
if value[:3] == '"""':
return s
@@ -201,6 +244,10 @@ middles: list[Leaf],
is_raw_fstring: bool,
) -> tuple[list[Leaf], str]:
+ """Prefer double quotes but only if it doesn't cause more escaping.
+
+ Adds or removes backslashes as appropriate.
+ """
if quote == '"""':
return middles, quote
@@ -260,6 +307,7 @@
def normalize_unicode_escape_sequences(leaf: Leaf) -> None:
+ """Replace hex codes in Unicode escape sequences with lowercase representation."""
text = leaf.value
prefix = get_string_prefix(text)
if "r" in prefix.lower():
@@ -291,6 +339,12 @@
@lru_cache(maxsize=4096)
def char_width(char: str) -> int:
+ """Return the width of a single character as it would be displayed in a
+ terminal or editor (which respects Unicode East Asian Width).
+
+ Full width characters are counted as 2, while half width characters are
+ counted as 1. Also control characters are counted as 0.
+ """
table = WIDTH_TABLE
codepoint = ord(char)
highest = len(table) - 1
@@ -311,6 +365,12 @@
def str_width(line_str: str) -> int:
+ """Return the width of `line_str` as it would be displayed in a terminal
+ or editor (which respects Unicode East Asian Width).
+
+ You could utilize this function to determine, for example, if a string
+ is too wide to display in a terminal or editor.
+ """
if line_str.isascii():
# Fast path for a line consisting of only ASCII characters
return len(line_str)
@@ -318,10 +378,14 @@
def count_chars_in_width(line_str: str, max_width: int) -> int:
+ """Count the number of characters in `line_str` that would fit in a
+ terminal or editor of `max_width` (which respects Unicode East Asian
+ Width).
+ """
total_width = 0
for i, char in enumerate(line_str):
width = char_width(char)
if width + total_width > max_width:
return i
total_width += width
- return len(line_str)+ return len(line_str)
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/strings.py |
Add detailed docstrings explaining each function | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Python imports
import os
import pickle
import tempfile
from typing import Any, Optional, TypeVar, Union
# Local imports
from . import token
_P = TypeVar("_P", bound="Grammar")
Label = tuple[int, Optional[str]]
DFA = list[list[tuple[int, int]]]
DFAS = tuple[DFA, dict[int, int]]
Path = Union[str, "os.PathLike[str]"]
class Grammar:
def __init__(self) -> None:
self.symbol2number: dict[str, int] = {}
self.number2symbol: dict[int, str] = {}
self.states: list[DFA] = []
self.dfas: dict[int, DFAS] = {}
self.labels: list[Label] = [(0, "EMPTY")]
self.keywords: dict[str, int] = {}
self.soft_keywords: dict[str, int] = {}
self.tokens: dict[int, int] = {}
self.symbol2label: dict[str, int] = {}
self.version: tuple[int, int] = (0, 0)
self.start = 256
# Python 3.7+ parses async as a keyword, not an identifier
self.async_keywords = False
def dump(self, filename: Path) -> None:
# mypyc generates objects that don't have a __dict__, but they
# do have __getstate__ methods that will return an equivalent
# dictionary
if hasattr(self, "__dict__"):
d = self.__dict__
else:
d = self.__getstate__() # type: ignore
with tempfile.NamedTemporaryFile(
dir=os.path.dirname(filename), delete=False
) as f:
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
os.replace(f.name, filename)
def _update(self, attrs: dict[str, Any]) -> None:
for k, v in attrs.items():
setattr(self, k, v)
def load(self, filename: Path) -> None:
with open(filename, "rb") as f:
d = pickle.load(f)
self._update(d)
def loads(self, pkl: bytes) -> None:
self._update(pickle.loads(pkl))
def copy(self: _P) -> _P:
new = self.__class__()
for dict_attr in (
"symbol2number",
"number2symbol",
"dfas",
"keywords",
"soft_keywords",
"tokens",
"symbol2label",
):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
new.version = self.version
new.async_keywords = self.async_keywords
return new
def report(self) -> None:
from pprint import pprint
print("s2n")
pprint(self.symbol2number)
print("n2s")
pprint(self.number2symbol)
print("states")
pprint(self.states)
print("dfas")
pprint(self.dfas)
print("labels")
pprint(self.labels)
print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
:= COLONEQUAL
! BANG
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name) | --- +++ @@ -1,6 +1,16 @@ # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
+"""This module defines the data structures used to represent a grammar.
+
+These are a bit arcane because they are derived from the data
+structures used by Python's 'pgen' parser generator.
+
+There's also a table here mapping operators to their names in the
+token module; the Python tokenize module reports all operators as the
+fallback token code OP, but the parser needs the actual token code.
+
+"""
# Python imports
import os
@@ -19,6 +29,58 @@
class Grammar:
+ """Pgen parsing tables conversion class.
+
+ Once initialized, this class supplies the grammar tables for the
+ parsing engine implemented by parse.py. The parsing engine
+ accesses the instance variables directly. The class here does not
+ provide initialization of the tables; several subclasses exist to
+ do this (see the conv and pgen modules).
+
+ The load() method reads the tables from a pickle file, which is
+ much faster than the other ways offered by subclasses. The pickle
+ file is written by calling dump() (after loading the grammar
+ tables using a subclass). The report() method prints a readable
+ representation of the tables to stdout, for debugging.
+
+ The instance variables are as follows:
+
+ symbol2number -- a dict mapping symbol names to numbers. Symbol
+ numbers are always 256 or higher, to distinguish
+ them from token numbers, which are between 0 and
+ 255 (inclusive).
+
+ number2symbol -- a dict mapping numbers to symbol names;
+ these two are each other's inverse.
+
+ states -- a list of DFAs, where each DFA is a list of
+ states, each state is a list of arcs, and each
+ arc is a (i, j) pair where i is a label and j is
+ a state number. The DFA number is the index into
+ this list. (This name is slightly confusing.)
+ Final states are represented by a special arc of
+ the form (0, j) where j is its own state number.
+
+ dfas -- a dict mapping symbol numbers to (DFA, first)
+ pairs, where DFA is an item from the states list
+ above, and first is a set of tokens that can
+ begin this grammar rule (represented by a dict
+ whose values are always 1).
+
+ labels -- a list of (x, y) pairs where x is either a token
+ number or a symbol number, and y is either None
+ or a string; the strings are keywords. The label
+ number is the index in this list; label numbers
+ are used to mark state transitions (arcs) in the
+ DFAs.
+
+ start -- the number of the grammar's start symbol.
+
+ keywords -- a dict mapping keyword strings to arc labels.
+
+ tokens -- a dict mapping token numbers to arc labels.
+
+ """
def __init__(self) -> None:
self.symbol2number: dict[str, int] = {}
@@ -36,6 +98,7 @@ self.async_keywords = False
def dump(self, filename: Path) -> None:
+ """Dump the grammar tables to a pickle file."""
# mypyc generates objects that don't have a __dict__, but they
# do have __getstate__ methods that will return an equivalent
@@ -56,14 +119,19 @@ setattr(self, k, v)
def load(self, filename: Path) -> None:
+ """Load the grammar tables from a pickle file."""
with open(filename, "rb") as f:
d = pickle.load(f)
self._update(d)
def loads(self, pkl: bytes) -> None:
+ """Load the grammar tables from a pickle bytes object."""
self._update(pickle.loads(pkl))
def copy(self: _P) -> _P:
+ """
+ Copy the grammar.
+ """
new = self.__class__()
for dict_attr in (
"symbol2number",
@@ -83,6 +151,7 @@ return new
def report(self) -> None:
+ """Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print("s2n")
@@ -156,4 +225,4 @@ for line in opmap_raw.splitlines():
if line:
op, name = line.split()
- opmap[op] = getattr(token, name)+ opmap[op] = getattr(token, name)
| https://raw.githubusercontent.com/psf/black/HEAD/src/blib2to3/pgen2/grammar.py |
Add detailed docstrings explaining each function |
import json
import re
import tempfile
from typing import Any
from click import echo, style
from mypy_extensions import mypyc_attr
@mypyc_attr(patchable=True)
def _out(message: str | None = None, nl: bool = True, **styles: Any) -> None:
if message is not None:
if "bold" not in styles:
styles["bold"] = True
message = style(message, **styles)
echo(message, nl=nl, err=True)
@mypyc_attr(patchable=True)
def _err(message: str | None = None, nl: bool = True, **styles: Any) -> None:
if message is not None:
if "fg" not in styles:
styles["fg"] = "red"
message = style(message, **styles)
echo(message, nl=nl, err=True)
@mypyc_attr(patchable=True)
def out(message: str | None = None, nl: bool = True, **styles: Any) -> None:
_out(message, nl=nl, **styles)
def err(message: str | None = None, nl: bool = True, **styles: Any) -> None:
_err(message, nl=nl, **styles)
def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str:
a_nb = json.loads(a)
b_nb = json.loads(b)
diff_lines = [
diff(
"".join(a_nb["cells"][cell_number]["source"]) + "\n",
"".join(b_nb["cells"][cell_number]["source"]) + "\n",
f"{a_name}:cell_{cell_number}",
f"{b_name}:cell_{cell_number}",
)
for cell_number, cell in enumerate(a_nb["cells"])
if cell["cell_type"] == "code"
]
return "".join(diff_lines)
_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
def _splitlines_no_ff(source: str) -> list[str]:
result = [match[0] for match in _line_pattern.finditer(source)]
if result[-1] == "":
result.pop(-1)
return result
def diff(a: str, b: str, a_name: str, b_name: str) -> str:
import difflib
a_lines = _splitlines_no_ff(a)
b_lines = _splitlines_no_ff(b)
diff_lines = []
for line in difflib.unified_diff(
a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5
):
# Work around https://bugs.python.org/issue2142
# See:
# https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html
if line[-1] == "\n":
diff_lines.append(line)
else:
diff_lines.append(line + "\n")
diff_lines.append("\\ No newline at end of file\n")
return "".join(diff_lines)
def color_diff(contents: str) -> str:
lines = contents.split("\n")
for i, line in enumerate(lines):
if line.startswith("+++") or line.startswith("---"):
line = "\033[1m" + line + "\033[0m" # bold, reset
elif line.startswith("@@"):
line = "\033[36m" + line + "\033[0m" # cyan, reset
elif line.startswith("+"):
line = "\033[32m" + line + "\033[0m" # green, reset
elif line.startswith("-"):
line = "\033[31m" + line + "\033[0m" # red, reset
lines[i] = line
return "\n".join(lines)
@mypyc_attr(patchable=True)
def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str:
with tempfile.NamedTemporaryFile(
mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
) as f:
for lines in output:
f.write(lines)
if ensure_final_newline and lines and lines[-1] != "\n":
f.write("\n")
return f.name | --- +++ @@ -1,3 +1,7 @@+"""Nice output for Black.
+
+The double calls are for patching purposes in tests.
+"""
import json
import re
@@ -36,6 +40,7 @@
def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str:
+ """Return a unified diff string between each cell in notebooks `a` and `b`."""
a_nb = json.loads(a)
b_nb = json.loads(b)
diff_lines = [
@@ -55,6 +60,12 @@
def _splitlines_no_ff(source: str) -> list[str]:
+ """Split a string into lines ignoring form feed and other chars.
+
+ This mimics how the Python parser splits source code.
+
+ A simplified version of the function with the same name in Lib/ast.py
+ """
result = [match[0] for match in _line_pattern.finditer(source)]
if result[-1] == "":
result.pop(-1)
@@ -62,6 +73,7 @@
def diff(a: str, b: str, a_name: str, b_name: str) -> str:
+ """Return a unified diff string between strings `a` and `b`."""
import difflib
a_lines = _splitlines_no_ff(a)
@@ -82,6 +94,7 @@
def color_diff(contents: str) -> str:
+ """Inject the ANSI color codes to the diff."""
lines = contents.split("\n")
for i, line in enumerate(lines):
if line.startswith("+++") or line.startswith("---"):
@@ -98,6 +111,7 @@
@mypyc_attr(patchable=True)
def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str:
+ """Dump `output` to a temporary file. Return path to the file."""
with tempfile.NamedTemporaryFile(
mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
) as f:
@@ -105,4 +119,4 @@ f.write(lines)
if ensure_final_newline and lines and lines[-1] != "\n":
f.write("\n")
- return f.name+ return f.name
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/output.py |
Add docstrings to improve readability | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Python imports
import os
from typing import Union
# Local imports
from .pgen2 import driver
from .pgen2.grammar import Grammar
# Moved into initialize because mypyc can't handle __file__ (XXX bug)
# # The grammar file
# _GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
# _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
# "PatternGrammar.txt")
class Symbols:
def __init__(self, grammar: Grammar) -> None:
for name, symbol in grammar.symbol2number.items():
setattr(self, name, symbol)
class _python_symbols(Symbols):
and_expr: int
and_test: int
annassign: int
arglist: int
argument: int
arith_expr: int
asexpr_test: int
assert_stmt: int
async_funcdef: int
async_stmt: int
atom: int
augassign: int
break_stmt: int
case_block: int
classdef: int
comp_for: int
comp_if: int
comp_iter: int
comp_op: int
comparison: int
compound_stmt: int
continue_stmt: int
decorated: int
decorator: int
decorators: int
del_stmt: int
dictsetmaker: int
dotted_as_name: int
dotted_as_names: int
dotted_name: int
encoding_decl: int
eval_input: int
except_clause: int
expr: int
expr_stmt: int
exprlist: int
factor: int
file_input: int
flow_stmt: int
for_stmt: int
fstring: int
fstring_format_spec: int
fstring_middle: int
fstring_replacement_field: int
funcdef: int
global_stmt: int
guard: int
if_stmt: int
import_as_name: int
import_as_names: int
import_from: int
import_name: int
import_stmt: int
lambdef: int
listmaker: int
match_stmt: int
namedexpr_test: int
not_test: int
old_comp_for: int
old_comp_if: int
old_comp_iter: int
old_lambdef: int
old_test: int
or_test: int
parameters: int
paramspec: int
pass_stmt: int
pattern: int
patterns: int
power: int
raise_stmt: int
return_stmt: int
shift_expr: int
simple_stmt: int
single_input: int
sliceop: int
small_stmt: int
subject_expr: int
star_expr: int
stmt: int
subscript: int
subscriptlist: int
suite: int
term: int
test: int
testlist: int
testlist1: int
testlist_gexp: int
testlist_safe: int
testlist_star_expr: int
tfpdef: int
tfplist: int
tname: int
tname_star: int
trailer: int
try_stmt: int
tstring: int
tstring_format_spec: int
tstring_middle: int
tstring_replacement_field: int
type_stmt: int
typedargslist: int
typeparam: int
typeparams: int
typevar: int
typevartuple: int
varargslist: int
vfpdef: int
vfplist: int
vname: int
while_stmt: int
with_stmt: int
xor_expr: int
yield_arg: int
yield_expr: int
yield_stmt: int
class _pattern_symbols(Symbols):
Alternative: int
Alternatives: int
Details: int
Matcher: int
NegatedUnit: int
Repeater: int
Unit: int
python_grammar: Grammar
python_grammar_async_keywords: Grammar
python_grammar_soft_keywords: Grammar
pattern_grammar: Grammar
python_symbols: _python_symbols
pattern_symbols: _pattern_symbols
def initialize(cache_dir: Union[str, "os.PathLike[str]", None] = None) -> None:
global python_grammar
global python_grammar_async_keywords
global python_grammar_soft_keywords
global python_symbols
global pattern_grammar
global pattern_symbols
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
_PATTERN_GRAMMAR_FILE = os.path.join(
os.path.dirname(__file__), "PatternGrammar.txt"
)
python_grammar = driver.load_packaged_grammar("blib2to3", _GRAMMAR_FILE, cache_dir)
assert "print" not in python_grammar.keywords
assert "exec" not in python_grammar.keywords
soft_keywords = python_grammar.soft_keywords.copy()
python_grammar.soft_keywords.clear()
python_symbols = _python_symbols(python_grammar)
# Python 3.0-3.6
python_grammar.version = (3, 0)
# Python 3.7+
python_grammar_async_keywords = python_grammar.copy()
python_grammar_async_keywords.async_keywords = True
python_grammar_async_keywords.version = (3, 7)
# Python 3.10+
python_grammar_soft_keywords = python_grammar_async_keywords.copy()
python_grammar_soft_keywords.soft_keywords = soft_keywords
python_grammar_soft_keywords.version = (3, 10)
pattern_grammar = driver.load_packaged_grammar(
"blib2to3", _PATTERN_GRAMMAR_FILE, cache_dir
)
pattern_symbols = _pattern_symbols(pattern_grammar) | --- +++ @@ -1,6 +1,7 @@ # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
+"""Export the Python grammar and symbols."""
# Python imports
import os
@@ -19,6 +20,11 @@
class Symbols:
def __init__(self, grammar: Grammar) -> None:
+ """Initializer.
+
+ Creates an attribute for each grammar symbol (nonterminal),
+ whose value is the symbol's type (an int >= 256).
+ """
for name, symbol in grammar.symbol2number.items():
setattr(self, name, symbol)
@@ -199,4 +205,4 @@ pattern_grammar = driver.load_packaged_grammar(
"blib2to3", _PATTERN_GRAMMAR_FILE, cache_dir
)
- pattern_symbols = _pattern_symbols(pattern_grammar)+ pattern_symbols = _pattern_symbols(pattern_grammar)
| https://raw.githubusercontent.com/psf/black/HEAD/src/blib2to3/pygram.py |
Write beginner-friendly docstrings |
from dataclasses import dataclass, field
from enum import Enum, auto
from hashlib import sha256
from operator import attrgetter
from typing import Final
from black.const import DEFAULT_LINE_LENGTH
class TargetVersion(Enum):
PY33 = 3
PY34 = 4
PY35 = 5
PY36 = 6
PY37 = 7
PY38 = 8
PY39 = 9
PY310 = 10
PY311 = 11
PY312 = 12
PY313 = 13
PY314 = 14
def pretty(self) -> str:
assert self.name[:2] == "PY"
return f"Python {self.name[2]}.{self.name[3:]}"
class Feature(Enum):
F_STRINGS = 2
NUMERIC_UNDERSCORES = 3
TRAILING_COMMA_IN_CALL = 4
TRAILING_COMMA_IN_DEF = 5
# The following two feature-flags are mutually exclusive, and exactly one should be
# set for every version of python.
ASYNC_IDENTIFIERS = 6
ASYNC_KEYWORDS = 7
ASSIGNMENT_EXPRESSIONS = 8
POS_ONLY_ARGUMENTS = 9
RELAXED_DECORATORS = 10
PATTERN_MATCHING = 11
UNPACKING_ON_FLOW = 12
ANN_ASSIGN_EXTENDED_RHS = 13
EXCEPT_STAR = 14
VARIADIC_GENERICS = 15
DEBUG_F_STRINGS = 16
PARENTHESIZED_CONTEXT_MANAGERS = 17
TYPE_PARAMS = 18
# FSTRING_PARSING = 19 # unused
TYPE_PARAM_DEFAULTS = 20
UNPARENTHESIZED_EXCEPT_TYPES = 21
T_STRINGS = 22
FORCE_OPTIONAL_PARENTHESES = 50
# __future__ flags
FUTURE_ANNOTATIONS = 51
FUTURE_FLAG_TO_FEATURE: Final = {
"annotations": Feature.FUTURE_ANNOTATIONS,
}
VERSION_TO_FEATURES: dict[TargetVersion, set[Feature]] = {
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
TargetVersion.PY36: {
Feature.F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_IDENTIFIERS,
},
TargetVersion.PY37: {
Feature.F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
},
TargetVersion.PY38: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
},
TargetVersion.PY39: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
},
TargetVersion.PY310: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
},
TargetVersion.PY311: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
},
TargetVersion.PY312: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
Feature.TYPE_PARAMS,
},
TargetVersion.PY313: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
Feature.TYPE_PARAMS,
Feature.TYPE_PARAM_DEFAULTS,
},
TargetVersion.PY314: {
Feature.F_STRINGS,
Feature.DEBUG_F_STRINGS,
Feature.NUMERIC_UNDERSCORES,
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
Feature.ASYNC_KEYWORDS,
Feature.FUTURE_ANNOTATIONS,
Feature.ASSIGNMENT_EXPRESSIONS,
Feature.RELAXED_DECORATORS,
Feature.POS_ONLY_ARGUMENTS,
Feature.UNPACKING_ON_FLOW,
Feature.ANN_ASSIGN_EXTENDED_RHS,
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.PATTERN_MATCHING,
Feature.EXCEPT_STAR,
Feature.VARIADIC_GENERICS,
Feature.TYPE_PARAMS,
Feature.TYPE_PARAM_DEFAULTS,
Feature.UNPARENTHESIZED_EXCEPT_TYPES,
Feature.T_STRINGS,
},
}
def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> bool:
if not target_versions:
raise ValueError("At least one target Python version must be specified.")
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
class Preview(Enum):
# NOTE: string_processing requires wrap_long_dict_values_in_parens
# for https://github.com/psf/black/issues/3117 to be fixed.
string_processing = auto()
hug_parens_with_braces_and_square_brackets = auto()
wrap_comprehension_in = auto()
simplify_power_operator_hugging = auto()
wrap_long_dict_values_in_parens = auto()
fix_if_guard_explosion_in_case_statement = auto()
UNSTABLE_FEATURES: set[Preview] = {
# Many issues, see summary in https://github.com/psf/black/issues/4208
Preview.string_processing,
# See issue #4036 (crash), #4098, #4099 (proposed tweaks)
Preview.hug_parens_with_braces_and_square_brackets,
}
_MAX_CACHE_KEY_PART_LENGTH: Final = 32
@dataclass
class Mode:
target_versions: set[TargetVersion] = field(default_factory=set)
line_length: int = DEFAULT_LINE_LENGTH
string_normalization: bool = True
is_pyi: bool = False
is_ipynb: bool = False
skip_source_first_line: bool = False
magic_trailing_comma: bool = True
python_cell_magics: set[str] = field(default_factory=set)
preview: bool = False
unstable: bool = False
enabled_features: set[Preview] = field(default_factory=set)
def __contains__(self, feature: Preview) -> bool:
if self.unstable:
return True
if feature in self.enabled_features:
return True
return self.preview and feature not in UNSTABLE_FEATURES
def get_cache_key(self) -> str:
if self.target_versions:
version_str = ",".join(
str(version.value)
for version in sorted(self.target_versions, key=attrgetter("value"))
)
else:
version_str = "-"
if len(version_str) > _MAX_CACHE_KEY_PART_LENGTH:
version_str = sha256(version_str.encode()).hexdigest()[
:_MAX_CACHE_KEY_PART_LENGTH
]
features_and_magics = (
",".join(sorted(f.name for f in self.enabled_features))
+ "@"
+ ",".join(sorted(self.python_cell_magics))
)
features_and_magics = sha256(features_and_magics.encode()).hexdigest()[
:_MAX_CACHE_KEY_PART_LENGTH
]
parts = [
version_str,
str(self.line_length),
str(int(self.string_normalization)),
str(int(self.is_pyi)),
str(int(self.is_ipynb)),
str(int(self.skip_source_first_line)),
str(int(self.magic_trailing_comma)),
str(int(self.preview)),
str(int(self.unstable)),
features_and_magics,
]
return ".".join(parts)
def __hash__(self) -> int:
return hash((
frozenset(self.target_versions),
self.line_length,
self.string_normalization,
self.is_pyi,
self.is_ipynb,
self.skip_source_first_line,
self.magic_trailing_comma,
frozenset(self.python_cell_magics),
self.preview,
self.unstable,
frozenset(self.enabled_features),
)) | --- +++ @@ -1,3 +1,8 @@+"""Data structures configuring Black behavior.
+
+Mostly around Python language feature support per version and Black configuration
+chosen by the user.
+"""
from dataclasses import dataclass, field
from enum import Enum, auto
@@ -215,6 +220,7 @@
class Preview(Enum):
+ """Individual preview style features."""
# NOTE: string_processing requires wrap_long_dict_values_in_parens
# for https://github.com/psf/black/issues/3117 to be fixed.
@@ -252,6 +258,13 @@ enabled_features: set[Preview] = field(default_factory=set)
def __contains__(self, feature: Preview) -> bool:
+ """
+ Provide `Preview.FEATURE in Mode` syntax that mirrors the ``preview`` flag.
+
+ In unstable mode, all features are enabled. In preview mode, all features
+ except those in UNSTABLE_FEATURES are enabled. Any features in
+ `self.enabled_features` are also enabled.
+ """
if self.unstable:
return True
if feature in self.enabled_features:
@@ -305,4 +318,4 @@ self.preview,
self.unstable,
frozenset(self.enabled_features),
- ))+ ))
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/mode.py |
Add detailed docstrings explaining each function |
import difflib
from collections.abc import Collection, Iterator, Sequence
from dataclasses import dataclass
from black.nodes import (
LN,
STANDALONE_COMMENT,
Leaf,
Node,
Visitor,
first_leaf,
furthest_ancestor_with_last_leaf,
last_leaf,
syms,
)
from blib2to3.pgen2.token import ASYNC, NEWLINE
def parse_line_ranges(line_ranges: Sequence[str]) -> list[tuple[int, int]]:
lines: list[tuple[int, int]] = []
for lines_str in line_ranges:
parts = lines_str.split("-")
if len(parts) != 2:
raise ValueError(
"Incorrect --line-ranges format, expect 'START-END', found"
f" {lines_str!r}"
)
try:
start = int(parts[0])
end = int(parts[1])
except ValueError:
raise ValueError(
"Incorrect --line-ranges value, expect integer ranges, found"
f" {lines_str!r}"
) from None
else:
lines.append((start, end))
return lines
def is_valid_line_range(lines: tuple[int, int]) -> bool:
return not lines or lines[0] <= lines[1]
def sanitized_lines(
lines: Collection[tuple[int, int]], src_contents: str
) -> Collection[tuple[int, int]]:
if not src_contents:
return []
good_lines = []
src_line_count = src_contents.count("\n")
if not src_contents.endswith("\n"):
src_line_count += 1
for start, end in lines:
if start > src_line_count:
continue
# line-ranges are 1-based
start = max(start, 1)
if end < start:
continue
end = min(end, src_line_count)
good_lines.append((start, end))
return good_lines
def adjusted_lines(
lines: Collection[tuple[int, int]],
original_source: str,
modified_source: str,
) -> list[tuple[int, int]]:
lines_mappings = _calculate_lines_mappings(original_source, modified_source)
new_lines = []
# Keep an index of the current search. Since the lines and lines_mappings are
# sorted, this makes the search complexity linear.
current_mapping_index = 0
for start, end in sorted(lines):
start_mapping_index = _find_lines_mapping_index(
start,
lines_mappings,
current_mapping_index,
)
end_mapping_index = _find_lines_mapping_index(
end,
lines_mappings,
start_mapping_index,
)
current_mapping_index = start_mapping_index
if start_mapping_index >= len(lines_mappings) or end_mapping_index >= len(
lines_mappings
):
# Protect against invalid inputs.
continue
start_mapping = lines_mappings[start_mapping_index]
end_mapping = lines_mappings[end_mapping_index]
if start_mapping.is_changed_block:
# When the line falls into a changed block, expands to the whole block.
new_start = start_mapping.modified_start
else:
new_start = (
start - start_mapping.original_start + start_mapping.modified_start
)
if end_mapping.is_changed_block:
# When the line falls into a changed block, expands to the whole block.
new_end = end_mapping.modified_end
else:
new_end = end - end_mapping.original_start + end_mapping.modified_start
new_range = (new_start, new_end)
if is_valid_line_range(new_range):
new_lines.append(new_range)
return new_lines
def convert_unchanged_lines(src_node: Node, lines: Collection[tuple[int, int]]) -> None:
lines_set: set[int] = set()
for start, end in lines:
lines_set.update(range(start, end + 1))
visitor = _TopLevelStatementsVisitor(lines_set)
_ = list(visitor.visit(src_node)) # Consume all results.
_convert_unchanged_line_by_line(src_node, lines_set)
def _contains_standalone_comment(node: LN) -> bool:
if isinstance(node, Leaf):
return node.type == STANDALONE_COMMENT
else:
for child in node.children:
if _contains_standalone_comment(child):
return True
return False
class _TopLevelStatementsVisitor(Visitor[None]):
def __init__(self, lines_set: set[int]):
self._lines_set = lines_set
def visit_simple_stmt(self, node: Node) -> Iterator[None]:
# This is only called for top-level statements, since `visit_suite`
# won't visit its children nodes.
yield from []
newline_leaf = last_leaf(node)
if not newline_leaf:
return
assert (
newline_leaf.type == NEWLINE
), f"Unexpectedly found leaf.type={newline_leaf.type}"
# We need to find the furthest ancestor with the NEWLINE as the last
# leaf, since a `suite` can simply be a `simple_stmt` when it puts
# its body on the same line. Example: `if cond: pass`.
ancestor = furthest_ancestor_with_last_leaf(newline_leaf)
if not _get_line_range(ancestor).intersection(self._lines_set):
_convert_node_to_standalone_comment(ancestor)
def visit_suite(self, node: Node) -> Iterator[None]:
yield from []
# If there is a STANDALONE_COMMENT node, it means parts of the node tree
# have fmt on/off/skip markers. Those STANDALONE_COMMENT nodes can't
# be simply converted by calling str(node). So we just don't convert
# here.
if _contains_standalone_comment(node):
return
# Find the semantic parent of this suite. For `async_stmt` and
# `async_funcdef`, the ASYNC token is defined on a separate level by the
# grammar.
semantic_parent = node.parent
if semantic_parent is not None:
if (
semantic_parent.prev_sibling is not None
and semantic_parent.prev_sibling.type == ASYNC
):
semantic_parent = semantic_parent.parent
if semantic_parent is not None and not _get_line_range(
semantic_parent
).intersection(self._lines_set):
_convert_node_to_standalone_comment(semantic_parent)
def _convert_unchanged_line_by_line(node: Node, lines_set: set[int]) -> None:
for leaf in node.leaves():
if leaf.type != NEWLINE:
# We only consider "unwrapped lines", which are divided by the NEWLINE
# token.
continue
if leaf.parent and leaf.parent.type == syms.match_stmt:
# The `suite` node is defined as:
# match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT
# Here we need to check `subject_expr`. The `case_block+` will be
# checked by their own NEWLINEs.
nodes_to_ignore: list[LN] = []
prev_sibling = leaf.prev_sibling
while prev_sibling:
nodes_to_ignore.insert(0, prev_sibling)
prev_sibling = prev_sibling.prev_sibling
if not _get_line_range(nodes_to_ignore).intersection(lines_set):
_convert_nodes_to_standalone_comment(nodes_to_ignore, newline=leaf)
elif leaf.parent and leaf.parent.type == syms.suite:
# The `suite` node is defined as:
# suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
# We will check `simple_stmt` and `stmt+` separately against the lines set
parent_sibling = leaf.parent.prev_sibling
nodes_to_ignore = []
while parent_sibling and parent_sibling.type != syms.suite:
# NOTE: Multiple suite nodes can exist as siblings in e.g. `if_stmt`.
nodes_to_ignore.insert(0, parent_sibling)
parent_sibling = parent_sibling.prev_sibling
# Special case for `async_stmt` and `async_funcdef` where the ASYNC
# token is on the grandparent node.
grandparent = leaf.parent.parent
if (
grandparent is not None
and grandparent.prev_sibling is not None
and grandparent.prev_sibling.type == ASYNC
):
nodes_to_ignore.insert(0, grandparent.prev_sibling)
if not _get_line_range(nodes_to_ignore).intersection(lines_set):
_convert_nodes_to_standalone_comment(nodes_to_ignore, newline=leaf)
else:
ancestor = furthest_ancestor_with_last_leaf(leaf)
# Consider multiple decorators as a whole block, as their
# newlines have different behaviors than the rest of the grammar.
if (
ancestor.type == syms.decorator
and ancestor.parent
and ancestor.parent.type == syms.decorators
):
ancestor = ancestor.parent
if not _get_line_range(ancestor).intersection(lines_set):
_convert_node_to_standalone_comment(ancestor)
def _convert_node_to_standalone_comment(node: LN) -> None:
parent = node.parent
if not parent:
return
first = first_leaf(node)
last = last_leaf(node)
if not first or not last:
return
if first is last:
# This can happen on the following edge cases:
# 1. A block of `# fmt: off/on` code except the `# fmt: on` is placed
# on the end of the last line instead of on a new line.
# 2. A single backslash on its own line followed by a comment line.
# Ideally we don't want to format them when not requested, but fixing
# isn't easy. These cases are also badly formatted code, so it isn't
# too bad we reformat them.
return
# The prefix contains comments and indentation whitespaces. They are
# reformatted accordingly to the correct indentation level.
# This also means the indentation will be changed on the unchanged lines, and
# this is actually required to not break incremental reformatting.
prefix = first.prefix
first.prefix = ""
index = node.remove()
if index is not None:
# Because of the special handling of multiple decorators, if the decorated
# item is a single line then there will be a missing newline between the
# decorator and item, so add it back. This doesn't affect any other case
# since a decorated item with a newline would hit the earlier suite case
# in _convert_unchanged_line_by_line that correctly handles the newlines.
if node.type == syms.decorated:
# A leaf of type decorated wouldn't make sense, since it should always
# have at least the decorator + the decorated item, so if this assert
# hits that means there's a problem in the parser.
assert isinstance(node, Node)
# 1 will always be the correct index since before this function is
# called all the decorators are collapsed into a single leaf
node.insert_child(1, Leaf(NEWLINE, "\n"))
# Remove the '\n', as STANDALONE_COMMENT will have '\n' appended when
# generating the formatted code.
value = str(node)[:-1]
parent.insert_child(
index,
Leaf(
STANDALONE_COMMENT,
value,
prefix=prefix,
fmt_pass_converted_first_leaf=first,
),
)
def _convert_nodes_to_standalone_comment(nodes: Sequence[LN], *, newline: Leaf) -> None:
if not nodes:
return
parent = nodes[0].parent
first = first_leaf(nodes[0])
if not parent or not first:
return
prefix = first.prefix
first.prefix = ""
value = "".join(str(node) for node in nodes)
# The prefix comment on the NEWLINE leaf is the trailing comment of the statement.
if newline.prefix:
value += newline.prefix
newline.prefix = ""
index = nodes[0].remove()
for node in nodes[1:]:
node.remove()
if index is not None:
parent.insert_child(
index,
Leaf(
STANDALONE_COMMENT,
value,
prefix=prefix,
fmt_pass_converted_first_leaf=first,
),
)
def _leaf_line_end(leaf: Leaf) -> int:
if leaf.type == NEWLINE:
return leaf.lineno
else:
# Leaf nodes like multiline strings can occupy multiple lines.
return leaf.lineno + str(leaf).count("\n")
def _get_line_range(node_or_nodes: LN | list[LN]) -> set[int]:
if isinstance(node_or_nodes, list):
nodes = node_or_nodes
if not nodes:
return set()
first = first_leaf(nodes[0])
last = last_leaf(nodes[-1])
if first and last:
line_start = first.lineno
line_end = _leaf_line_end(last)
return set(range(line_start, line_end + 1))
else:
return set()
else:
node = node_or_nodes
if isinstance(node, Leaf):
return set(range(node.lineno, _leaf_line_end(node) + 1))
else:
first = first_leaf(node)
last = last_leaf(node)
if first and last:
return set(range(first.lineno, _leaf_line_end(last) + 1))
else:
return set()
@dataclass
class _LinesMapping:
original_start: int
original_end: int
modified_start: int
modified_end: int
# Whether this range corresponds to a changed block, or an unchanged block.
is_changed_block: bool
def _calculate_lines_mappings(
original_source: str,
modified_source: str,
) -> Sequence[_LinesMapping]:
matcher = difflib.SequenceMatcher(
None,
original_source.splitlines(keepends=True),
modified_source.splitlines(keepends=True),
)
matching_blocks = matcher.get_matching_blocks()
lines_mappings: list[_LinesMapping] = []
# matching_blocks is a sequence of "same block of code ranges", see
# https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks
# Each block corresponds to a _LinesMapping with is_changed_block=False,
# and the ranges between two blocks corresponds to a _LinesMapping with
# is_changed_block=True,
# NOTE: matching_blocks is 0-based, but _LinesMapping is 1-based.
for i, block in enumerate(matching_blocks):
if i == 0:
if block.a != 0 or block.b != 0:
lines_mappings.append(
_LinesMapping(
original_start=1,
original_end=block.a,
modified_start=1,
modified_end=block.b,
is_changed_block=False,
)
)
else:
previous_block = matching_blocks[i - 1]
lines_mappings.append(
_LinesMapping(
original_start=previous_block.a + previous_block.size + 1,
original_end=block.a,
modified_start=previous_block.b + previous_block.size + 1,
modified_end=block.b,
is_changed_block=True,
)
)
if i < len(matching_blocks) - 1:
lines_mappings.append(
_LinesMapping(
original_start=block.a + 1,
original_end=block.a + block.size,
modified_start=block.b + 1,
modified_end=block.b + block.size,
is_changed_block=False,
)
)
return lines_mappings
def _find_lines_mapping_index(
original_line: int,
lines_mappings: Sequence[_LinesMapping],
start_index: int,
) -> int:
index = start_index
while index < len(lines_mappings):
mapping = lines_mappings[index]
if mapping.original_start <= original_line <= mapping.original_end:
return index
index += 1
return index | --- +++ @@ -1,3 +1,4 @@+"""Functions related to Black's formatting by line ranges feature."""
import difflib
from collections.abc import Collection, Iterator, Sequence
@@ -40,12 +41,20 @@
def is_valid_line_range(lines: tuple[int, int]) -> bool:
+ """Returns whether the line range is valid."""
return not lines or lines[0] <= lines[1]
def sanitized_lines(
lines: Collection[tuple[int, int]], src_contents: str
) -> Collection[tuple[int, int]]:
+ """Returns the valid line ranges for the given source.
+
+ This removes ranges that are entirely outside the valid lines.
+
+ Other ranges are normalized so that the start values are at least 1 and the
+ end values are at most the (1-based) index of the last source line.
+ """
if not src_contents:
return []
good_lines = []
@@ -69,6 +78,38 @@ original_source: str,
modified_source: str,
) -> list[tuple[int, int]]:
+ """Returns the adjusted line ranges based on edits from the original code.
+
+ This computes the new line ranges by diffing original_source and
+ modified_source, and adjust each range based on how the range overlaps with
+ the diffs.
+
+ Note the diff can contain lines outside of the original line ranges. This can
+ happen when the formatting has to be done in adjacent to maintain consistent
+ local results. For example:
+
+ 1. def my_func(arg1, arg2,
+ 2. arg3,):
+ 3. pass
+
+ If it restricts to line 2-2, it can't simply reformat line 2, it also has
+ to reformat line 1:
+
+ 1. def my_func(
+ 2. arg1,
+ 3. arg2,
+ 4. arg3,
+ 5. ):
+ 6. pass
+
+ In this case, we will expand the line ranges to also include the whole diff
+ block.
+
+ Args:
+ lines: a collection of line ranges.
+ original_source: the original source.
+ modified_source: the modified source.
+ """
lines_mappings = _calculate_lines_mappings(original_source, modified_source)
new_lines = []
@@ -113,6 +154,29 @@
def convert_unchanged_lines(src_node: Node, lines: Collection[tuple[int, int]]) -> None:
+ r"""Converts unchanged lines to STANDALONE_COMMENT.
+
+ The idea is similar to how `# fmt: on/off` is implemented. It also converts the
+ nodes between those markers as a single `STANDALONE_COMMENT` leaf node with
+ the unformatted code as its value. `STANDALONE_COMMENT` is a "fake" token
+ that will be formatted as-is with its prefix normalized.
+
+ Here we perform two passes:
+
+ 1. Visit the top-level statements, and convert them to a single
+ `STANDALONE_COMMENT` when unchanged. This speeds up formatting when some
+ of the top-level statements aren't changed.
+ 2. Convert unchanged "unwrapped lines" to `STANDALONE_COMMENT` nodes line by
+ line. "unwrapped lines" are divided by the `NEWLINE` token. e.g. a
+ multi-line statement is *one* "unwrapped line" that ends with `NEWLINE`,
+ even though this statement itself can span multiple lines, and the
+ tokenizer only sees the last '\n' as the `NEWLINE` token.
+
+ NOTE: During pass (2), comment prefixes and indentations are ALWAYS
+ normalized even when the lines aren't changed. This is fixable by moving
+ more formatting to pass (1). However, it's hard to get it correct when
+ incorrect indentations are used. So we defer this to future optimizations.
+ """
lines_set: set[int] = set()
for start, end in lines:
lines_set.update(range(start, end + 1))
@@ -132,6 +196,14 @@
class _TopLevelStatementsVisitor(Visitor[None]):
+ """
+ A node visitor that converts unchanged top-level statements to
+ STANDALONE_COMMENT.
+
+ This is used in addition to _convert_unchanged_line_by_line, to
+ speed up formatting when there are unchanged top-level
+ classes/functions/statements.
+ """
def __init__(self, lines_set: set[int]):
self._lines_set = lines_set
@@ -178,6 +250,7 @@
def _convert_unchanged_line_by_line(node: Node, lines_set: set[int]) -> None:
+ """Converts unchanged to STANDALONE_COMMENT line by line."""
for leaf in node.leaves():
if leaf.type != NEWLINE:
# We only consider "unwrapped lines", which are divided by the NEWLINE
@@ -231,6 +304,7 @@
def _convert_node_to_standalone_comment(node: LN) -> None:
+ """Convert node to STANDALONE_COMMENT by modifying the tree inline."""
parent = node.parent
if not parent:
return
@@ -283,6 +357,7 @@
def _convert_nodes_to_standalone_comment(nodes: Sequence[LN], *, newline: Leaf) -> None:
+ """Convert nodes to STANDALONE_COMMENT by modifying the tree inline."""
if not nodes:
return
parent = nodes[0].parent
@@ -312,6 +387,7 @@
def _leaf_line_end(leaf: Leaf) -> int:
+ """Returns the line number of the leaf node's last line."""
if leaf.type == NEWLINE:
return leaf.lineno
else:
@@ -320,6 +396,7 @@
def _get_line_range(node_or_nodes: LN | list[LN]) -> set[int]:
+ """Returns the line range of this node or list of nodes."""
if isinstance(node_or_nodes, list):
nodes = node_or_nodes
if not nodes:
@@ -347,6 +424,13 @@
@dataclass
class _LinesMapping:
+ """1-based lines mapping from original source to modified source.
+
+ Lines [original_start, original_end] from original source
+ are mapped to [modified_start, modified_end].
+
+ The ranges are inclusive on both ends.
+ """
original_start: int
original_end: int
@@ -360,6 +444,32 @@ original_source: str,
modified_source: str,
) -> Sequence[_LinesMapping]:
+ """Returns a sequence of _LinesMapping by diffing the sources.
+
+ For example, given the following diff:
+ import re
+ - def func(arg1,
+ - arg2, arg3):
+ + def func(arg1, arg2, arg3):
+ pass
+ It returns the following mappings:
+ original -> modified
+ (1, 1) -> (1, 1), is_changed_block=False (the "import re" line)
+ (2, 3) -> (2, 2), is_changed_block=True (the diff)
+ (4, 4) -> (3, 3), is_changed_block=False (the "pass" line)
+
+ You can think of this visually as if it brings up a side-by-side diff, and tries
+ to map the line ranges from the left side to the right side:
+
+ (1, 1)->(1, 1) 1. import re 1. import re
+ (2, 3)->(2, 2) 2. def func(arg1, 2. def func(arg1, arg2, arg3):
+ 3. arg2, arg3):
+ (4, 4)->(3, 3) 4. pass 3. pass
+
+ Args:
+ original_source: the original source.
+ modified_source: the modified source.
+ """
matcher = difflib.SequenceMatcher(
None,
original_source.splitlines(keepends=True),
@@ -414,10 +524,11 @@ lines_mappings: Sequence[_LinesMapping],
start_index: int,
) -> int:
+ """Returns the original index of the lines mappings for the original line."""
index = start_index
while index < len(lines_mappings):
mapping = lines_mappings[index]
if mapping.original_start <= original_line <= mapping.original_end:
return index
index += 1
- return index+ return index
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/ranges.py |
Create docstrings for all classes and functions | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# mypy: allow-untyped-defs, allow-incomplete-defs
from collections.abc import Iterable, Iterator
from typing import Any, Optional, TypeVar, Union
from blib2to3.pgen2.grammar import Grammar
__author__ = "Guido van Rossum <guido@python.org>"
import sys
from io import StringIO
HUGE: int = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs: dict[int, str | int] = {}
def type_repr(type_num: int) -> str | int:
global _type_reprs
if not _type_reprs:
from . import pygram
if not hasattr(pygram, "python_symbols"):
pygram.initialize(cache_dir=None)
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name in dir(pygram.python_symbols):
val = getattr(pygram.python_symbols, name)
if type(val) == int:
_type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
_P = TypeVar("_P", bound="Base")
NL = Union["Node", "Leaf"]
Context = tuple[str, tuple[int, int]]
RawNode = tuple[int, Optional[str], Optional[Context], Optional[list[NL]]]
class Base:
# Default values for instance variables
type: int # int: token number (< 256) or symbol number (>= 256)
parent: Optional["Node"] = None # Parent node pointer, or None
children: list[NL] # List of subnodes
was_changed: bool = False
was_checked: bool = False
def __new__(cls, *args, **kwds):
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other: Any) -> bool:
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
@property
def prefix(self) -> str:
raise NotImplementedError
def _eq(self: _P, other: _P) -> bool:
raise NotImplementedError
def __deepcopy__(self: _P, memo: Any) -> _P:
return self.clone()
def clone(self: _P) -> _P:
raise NotImplementedError
def post_order(self) -> Iterator[NL]:
raise NotImplementedError
def pre_order(self) -> Iterator[NL]:
raise NotImplementedError
def replace(self, new: NL | list[NL]) -> None:
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.children = l_children
self.parent.changed()
self.parent.invalidate_sibling_maps()
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self) -> int | None:
node = self
while not isinstance(node, Leaf):
if not node.children:
return None
node = node.children[0]
return node.lineno
def changed(self) -> None:
if self.was_changed:
return
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self) -> int | None:
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
del self.parent.children[i]
self.parent.changed()
self.parent.invalidate_sibling_maps()
self.parent = None
return i
return None
@property
def next_sibling(self) -> NL | None:
if self.parent is None:
return None
if self.parent.next_sibling_map is None:
self.parent.update_sibling_maps()
assert self.parent.next_sibling_map is not None
return self.parent.next_sibling_map[id(self)]
@property
def prev_sibling(self) -> NL | None:
if self.parent is None:
return None
if self.parent.prev_sibling_map is None:
self.parent.update_sibling_maps()
assert self.parent.prev_sibling_map is not None
return self.parent.prev_sibling_map[id(self)]
def leaves(self) -> Iterator["Leaf"]:
for child in self.children:
yield from child.leaves()
def depth(self) -> int:
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self) -> str:
next_sib = self.next_sibling
if next_sib is None:
return ""
prefix = next_sib.prefix
return prefix
class Node(Base):
fixers_applied: list[Any] | None
used_names: set[str] | None
def __init__(
self,
type: int,
children: list[NL],
context: Any | None = None,
prefix: str | None = None,
fixers_applied: list[Any] | None = None,
) -> None:
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
self.invalidate_sibling_maps()
if prefix is not None:
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
def __repr__(self) -> str:
assert self.type is not None
return f"{self.__class__.__name__}({type_repr(self.type)}, {self.children!r})"
def __str__(self) -> str:
return "".join(map(str, self.children))
def _eq(self, other: Base) -> bool:
return (self.type, self.children) == (other.type, other.children)
def clone(self) -> "Node":
assert self.type is not None
"""Return a cloned (deep) copy of self."""
return Node(
self.type,
[ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied,
)
def post_order(self) -> Iterator[NL]:
for child in self.children:
yield from child.post_order()
yield self
def pre_order(self) -> Iterator[NL]:
yield self
for child in self.children:
yield from child.pre_order()
@property
def prefix(self) -> str:
if not self.children:
return ""
return self.children[0].prefix
@prefix.setter
def prefix(self, prefix: str) -> None:
if self.children:
self.children[0].prefix = prefix
def set_child(self, i: int, child: NL) -> None:
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
self.invalidate_sibling_maps()
def insert_child(self, i: int, child: NL) -> None:
child.parent = self
self.children.insert(i, child)
self.changed()
self.invalidate_sibling_maps()
def append_child(self, child: NL) -> None:
child.parent = self
self.children.append(child)
self.changed()
self.invalidate_sibling_maps()
def invalidate_sibling_maps(self) -> None:
self.prev_sibling_map: dict[int, NL | None] | None = None
self.next_sibling_map: dict[int, NL | None] | None = None
def update_sibling_maps(self) -> None:
_prev: dict[int, NL | None] = {}
_next: dict[int, NL | None] = {}
self.prev_sibling_map = _prev
self.next_sibling_map = _next
previous: NL | None = None
for current in self.children:
_prev[id(current)] = previous
_next[id(previous)] = current
previous = current
_next[id(current)] = None
class Leaf(Base):
# Default values for instance variables
value: str
fixers_applied: list[Any]
bracket_depth: int
# Changed later in brackets.py
opening_bracket: Optional["Leaf"] = None
used_names: set[str] | None
_prefix = "" # Whitespace and comments preceding this token in the input
lineno: int = 0 # Line where this token starts in the input
column: int = 0 # Column where this token starts in the input
# If not None, this Leaf is created by converting a block of fmt off/skip
# code, and `fmt_pass_converted_first_leaf` points to the first Leaf in the
# converted code.
fmt_pass_converted_first_leaf: Optional["Leaf"] = None
def __init__(
self,
type: int,
value: str,
context: Context | None = None,
prefix: str | None = None,
fixers_applied: list[Any] = [],
opening_bracket: Optional["Leaf"] = None,
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
) -> None:
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied: list[Any] | None = fixers_applied[:]
self.children = []
self.opening_bracket = opening_bracket
self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf
def __repr__(self) -> str:
from .pgen2.token import tok_name
assert self.type is not None
return (
f"{self.__class__.__name__}({tok_name.get(self.type, self.type)},"
f" {self.value!r})"
)
def __str__(self) -> str:
return self._prefix + str(self.value)
def _eq(self, other: "Leaf") -> bool:
return (self.type, self.value) == (other.type, other.value)
def clone(self) -> "Leaf":
assert self.type is not None
"""Return a cloned (deep) copy of self."""
return Leaf(
self.type,
self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied,
)
def leaves(self) -> Iterator["Leaf"]:
yield self
def post_order(self) -> Iterator["Leaf"]:
yield self
def pre_order(self) -> Iterator["Leaf"]:
yield self
@property
def prefix(self) -> str:
return self._prefix
@prefix.setter
def prefix(self, prefix: str) -> None:
self.changed()
self._prefix = prefix
def convert(gr: Grammar, raw_node: RawNode) -> NL:
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
assert children is not None
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value or "", context=context)
_Results = dict[str, NL]
class BasePattern:
# Defaults for instance variables
type: int | None
type = None # Node type (token if < 256, symbol if >= 256)
content: Any = None # Optional content matching pattern
name: str | None = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self) -> str:
assert self.type is not None
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return f"{self.__class__.__name__}({', '.join(map(repr, args))})"
def _submatch(self, node, results=None) -> bool:
raise NotImplementedError
def optimize(self) -> "BasePattern":
return self
def match(self, node: NL, results: _Results | None = None) -> bool:
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r: _Results | None = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
assert results is not None
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes: list[NL], results: _Results | None = None) -> bool:
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
r: _Results = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(
self,
type: int | None = None,
content: str | None = None,
name: str | None = None,
) -> None:
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node: NL, results=None) -> bool:
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
return self.content == node.value
class NodePattern(BasePattern):
wildcards: bool = False
def __init__(
self,
type: int | None = None,
content: Iterable[str] | None = None,
name: str | None = None,
) -> None:
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, str), repr(content)
newcontent = list(content)
for i, item in enumerate(newcontent):
assert isinstance(item, BasePattern), (i, item)
# I don't even think this code is used anywhere, but it does cause
# unreachable errors from mypy. This function's signature does look
# odd though *shrug*.
if isinstance(item, WildcardPattern): # type: ignore[unreachable]
self.wildcards = True # type: ignore[unreachable]
self.type = type
self.content = newcontent # TODO: this is unbound when content is None
self.name = name
def _submatch(self, node, results=None) -> bool:
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
min: int
max: int
def __init__(
self,
content: str | None = None,
min: int = 0,
max: int = HUGE,
name: str | None = None,
) -> None:
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
f = lambda s: tuple(s)
wrapped_content = tuple(map(f, content)) # Protect against alterations
# Check sanity of alternatives
assert len(wrapped_content), repr(
wrapped_content
) # Can't have zero alternatives
for alt in wrapped_content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = wrapped_content
self.min = min
self.max = max
self.name = name
def optimize(self) -> Any:
subpattern = None
if (
self.content is not None
and len(self.content) == 1
and len(self.content[0]) == 1
):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (
self.min <= 1
and isinstance(subpattern, WildcardPattern)
and subpattern.min <= 1
and self.name == subpattern.name
):
return WildcardPattern(
subpattern.content,
self.min * subpattern.min,
self.max * subpattern.max,
subpattern.name,
)
return self
def match(self, node, results=None) -> bool:
return self.match_seq([node], results)
def match_seq(self, nodes, results=None) -> bool:
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in range(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We only have to do this on CPython, though, because other
# implementations don't have this nasty bug in the first place.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes) -> tuple[int, _Results]:
count = 0
r = {} # type: _Results
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
assert self.name is not None
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count) -> Iterator[tuple[int, _Results]]:
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count + 1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content: BasePattern | None = None) -> None:
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node, results=None) -> bool:
# We never match a node in its entirety
return False
def match_seq(self, nodes, results=None) -> bool:
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(
patterns: list[BasePattern], nodes: list[NL]
) -> Iterator[tuple[int, _Results]]:
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r | --- +++ @@ -1,6 +1,14 @@ # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
+"""
+Python parse tree definitions.
+
+This is a very concrete parse tree; we need to keep every token and
+even the comments and whitespace between tokens.
+
+There's also a pattern matching implementation here.
+"""
# mypy: allow-untyped-defs, allow-incomplete-defs
@@ -44,6 +52,14 @@
class Base:
+ """
+ Abstract base class for Node and Leaf.
+
+ This provides some default functionality and boilerplate using the
+ template pattern.
+
+ A node may be a subnode of at most one parent.
+ """
# Default values for instance variables
type: int # int: token number (< 256) or symbol number (>= 256)
@@ -53,10 +69,16 @@ was_checked: bool = False
def __new__(cls, *args, **kwds):
+ """Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other: Any) -> bool:
+ """
+ Compare two nodes for equality.
+
+ This calls the method _eq().
+ """
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
@@ -66,21 +88,45 @@ raise NotImplementedError
def _eq(self: _P, other: _P) -> bool:
+ """
+ Compare two nodes for equality.
+
+ This is called by __eq__ and __ne__. It is only called if the two nodes
+ have the same type. This must be implemented by the concrete subclass.
+ Nodes should be considered equal if they have the same structure,
+ ignoring the prefix string and other context information.
+ """
raise NotImplementedError
def __deepcopy__(self: _P, memo: Any) -> _P:
return self.clone()
def clone(self: _P) -> _P:
+ """
+ Return a cloned (deep) copy of self.
+
+ This must be implemented by the concrete subclass.
+ """
raise NotImplementedError
def post_order(self) -> Iterator[NL]:
+ """
+ Return a post-order iterator for the tree.
+
+ This must be implemented by the concrete subclass.
+ """
raise NotImplementedError
def pre_order(self) -> Iterator[NL]:
+ """
+ Return a pre-order iterator for the tree.
+
+ This must be implemented by the concrete subclass.
+ """
raise NotImplementedError
def replace(self, new: NL | list[NL]) -> None:
+ """Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
@@ -104,6 +150,7 @@ self.parent = None
def get_lineno(self) -> int | None:
+ """Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
@@ -119,6 +166,10 @@ self.was_changed = True
def remove(self) -> int | None:
+ """
+ Remove the node from the tree. Returns the position of the node in its
+ parent's children before it was removed.
+ """
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
@@ -131,6 +182,10 @@
@property
def next_sibling(self) -> NL | None:
+ """
+ The node immediately following the invocant in their parent's children
+ list. If the invocant does not have a next sibling, it is None
+ """
if self.parent is None:
return None
@@ -141,6 +196,10 @@
@property
def prev_sibling(self) -> NL | None:
+ """
+ The node immediately preceding the invocant in their parent's children
+ list. If the invocant does not have a previous sibling, it is None.
+ """
if self.parent is None:
return None
@@ -159,6 +218,10 @@ return 1 + self.parent.depth()
def get_suffix(self) -> str:
+ """
+ Return the string immediately following the invocant node. This is
+ effectively equivalent to node.next_sibling.prefix
+ """
next_sib = self.next_sibling
if next_sib is None:
return ""
@@ -167,6 +230,7 @@
class Node(Base):
+ """Concrete implementation for interior nodes."""
fixers_applied: list[Any] | None
used_names: set[str] | None
@@ -179,6 +243,14 @@ prefix: str | None = None,
fixers_applied: list[Any] | None = None,
) -> None:
+ """
+ Initializer.
+
+ Takes a type constant (a symbol number >= 256), a sequence of
+ child nodes, and an optional context keyword argument.
+
+ As a side effect, the parent pointers of the children are updated.
+ """
assert type >= 256, type
self.type = type
self.children = list(children)
@@ -194,13 +266,20 @@ self.fixers_applied = None
def __repr__(self) -> str:
+ """Return a canonical string representation."""
assert self.type is not None
return f"{self.__class__.__name__}({type_repr(self.type)}, {self.children!r})"
def __str__(self) -> str:
+ """
+ Return a pretty string representation.
+
+ This reproduces the input source exactly.
+ """
return "".join(map(str, self.children))
def _eq(self, other: Base) -> bool:
+ """Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self) -> "Node":
@@ -213,17 +292,22 @@ )
def post_order(self) -> Iterator[NL]:
+ """Return a post-order iterator for the tree."""
for child in self.children:
yield from child.post_order()
yield self
def pre_order(self) -> Iterator[NL]:
+ """Return a pre-order iterator for the tree."""
yield self
for child in self.children:
yield from child.pre_order()
@property
def prefix(self) -> str:
+ """
+ The whitespace and comments preceding this node in the input.
+ """
if not self.children:
return ""
return self.children[0].prefix
@@ -234,6 +318,10 @@ self.children[0].prefix = prefix
def set_child(self, i: int, child: NL) -> None:
+ """
+ Equivalent to 'node.children[i] = child'. This method also sets the
+ child's parent attribute appropriately.
+ """
child.parent = self
self.children[i].parent = None
self.children[i] = child
@@ -241,12 +329,20 @@ self.invalidate_sibling_maps()
def insert_child(self, i: int, child: NL) -> None:
+ """
+ Equivalent to 'node.children.insert(i, child)'. This method also sets
+ the child's parent attribute appropriately.
+ """
child.parent = self
self.children.insert(i, child)
self.changed()
self.invalidate_sibling_maps()
def append_child(self, child: NL) -> None:
+ """
+ Equivalent to 'node.children.append(child)'. This method also sets the
+ child's parent attribute appropriately.
+ """
child.parent = self
self.children.append(child)
self.changed()
@@ -270,6 +366,7 @@
class Leaf(Base):
+ """Concrete implementation for leaf nodes."""
# Default values for instance variables
value: str
@@ -296,6 +393,12 @@ opening_bracket: Optional["Leaf"] = None,
fmt_pass_converted_first_leaf: Optional["Leaf"] = None,
) -> None:
+ """
+ Initializer.
+
+ Takes a type constant (a token number < 256), a string value, and an
+ optional context keyword argument.
+ """
assert 0 <= type < 256, type
if context is not None:
@@ -310,6 +413,7 @@ self.fmt_pass_converted_first_leaf = fmt_pass_converted_first_leaf
def __repr__(self) -> str:
+ """Return a canonical string representation."""
from .pgen2.token import tok_name
assert self.type is not None
@@ -319,9 +423,15 @@ )
def __str__(self) -> str:
+ """
+ Return a pretty string representation.
+
+ This reproduces the input source exactly.
+ """
return self._prefix + str(self.value)
def _eq(self, other: "Leaf") -> bool:
+ """Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self) -> "Leaf":
@@ -338,13 +448,18 @@ yield self
def post_order(self) -> Iterator["Leaf"]:
+ """Return a post-order iterator for the tree."""
yield self
def pre_order(self) -> Iterator["Leaf"]:
+ """Return a pre-order iterator for the tree."""
yield self
@property
def prefix(self) -> str:
+ """
+ The whitespace and comments preceding this token in the input.
+ """
return self._prefix
@prefix.setter
@@ -354,6 +469,13 @@
def convert(gr: Grammar, raw_node: RawNode) -> NL:
+ """
+ Convert raw node information to a Node or Leaf instance.
+
+ This is passed to the parser driver which calls it whenever a reduction of a
+ grammar rule produces a new complete node, so that the tree is build
+ strictly bottom-up.
+ """
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
@@ -370,6 +492,19 @@
class BasePattern:
+ """
+ A pattern is a tree matching pattern.
+
+ It looks for a specific node type (token or symbol), and
+ optionally for a specific content.
+
+ This is an abstract base class. There are three concrete
+ subclasses:
+
+ - LeafPattern matches a single leaf node;
+ - NodePattern matches a single node (usually non-leaf);
+ - WildcardPattern matches a sequence of nodes of variable length.
+ """
# Defaults for instance variables
type: int | None
@@ -378,6 +513,7 @@ name: str | None = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
+ """Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
@@ -392,9 +528,24 @@ raise NotImplementedError
def optimize(self) -> "BasePattern":
+ """
+ A subclass can define this as a hook for optimizations.
+
+ Returns either self or another node with the same effect.
+ """
return self
def match(self, node: NL, results: _Results | None = None) -> bool:
+ """
+ Does this pattern exactly match a node?
+
+ Returns True if it matches, False if not.
+
+ If results is not None, it must be a dict which will be
+ updated with the nodes matching named subpatterns.
+
+ Default implementation for non-wildcard patterns.
+ """
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
@@ -411,11 +562,21 @@ return True
def match_seq(self, nodes: list[NL], results: _Results | None = None) -> bool:
+ """
+ Does this pattern exactly match a sequence of nodes?
+
+ Default implementation for non-wildcard patterns.
+ """
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes: list[NL]) -> Iterator[tuple[int, _Results]]:
+ """
+ Generator yielding all matches for this pattern.
+
+ Default implementation for non-wildcard patterns.
+ """
r: _Results = {}
if nodes and self.match(nodes[0], r):
yield 1, r
@@ -428,6 +589,17 @@ content: str | None = None,
name: str | None = None,
) -> None:
+ """
+ Initializer. Takes optional type, content, and name.
+
+ The type, if given must be a token type (< 256). If not given,
+ this matches any *leaf* node; the content may still be required.
+
+ The content, if given, must be a string.
+
+ If a name is given, the matching node is stored in the results
+ dict under that key.
+ """
if type is not None:
assert 0 <= type < 256, type
if content is not None:
@@ -437,11 +609,24 @@ self.name = name
def match(self, node: NL, results=None) -> bool:
+ """Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
+ """
+ Match the pattern's content to the node's children.
+
+ This assumes the node type matches and self.content is not None.
+
+ Returns True if it matches, False if not.
+
+ If results is not None, it must be a dict which will be
+ updated with the nodes matching named subpatterns.
+
+ When returning False, the results dict may still be updated.
+ """
return self.content == node.value
@@ -454,6 +639,21 @@ content: Iterable[str] | None = None,
name: str | None = None,
) -> None:
+ """
+ Initializer. Takes optional type, content, and name.
+
+ The type, if given, must be a symbol type (>= 256). If the
+ type is None this matches *any* single node (leaf or not),
+ except if content is not None, in which it only matches
+ non-leaf nodes that also match the content pattern.
+
+ The content, if not None, must be a sequence of Patterns that
+ must match the node's children exactly. If the content is
+ given, the type must not be None.
+
+ If a name is given, the matching node is stored in the results
+ dict under that key.
+ """
if type is not None:
assert type >= 256, type
if content is not None:
@@ -471,6 +671,18 @@ self.name = name
def _submatch(self, node, results=None) -> bool:
+ """
+ Match the pattern's content to the node's children.
+
+ This assumes the node type matches and self.content is not None.
+
+ Returns True if it matches, False if not.
+
+ If results is not None, it must be a dict which will be
+ updated with the nodes matching named subpatterns.
+
+ When returning False, the results dict may still be updated.
+ """
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
@@ -487,6 +699,17 @@
class WildcardPattern(BasePattern):
+ """
+ A wildcard pattern can match zero or more nodes.
+
+ This has all the flexibility needed to implement patterns like:
+
+ .* .+ .? .{m,n}
+ (a b c | d e | f)
+ (...)* (...)+ (...)? (...){m,n}
+
+ except it always uses non-greedy matching.
+ """
min: int
max: int
@@ -498,6 +721,28 @@ max: int = HUGE,
name: str | None = None,
) -> None:
+ """
+ Initializer.
+
+ Args:
+ content: optional sequence of subsequences of patterns;
+ if absent, matches one node;
+ if present, each subsequence is an alternative [*]
+ min: optional minimum number of times to match, default 0
+ max: optional maximum number of times to match, default HUGE
+ name: optional name assigned to this match
+
+ [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
+ equivalent to (a b c | d e | f g h); if content is None,
+ this is equivalent to '.' in regular expression terms.
+ The min and max parameters work as follows:
+ min=0, max=maxint: .*
+ min=1, max=maxint: .+
+ min=0, max=1: .?
+ min=1, max=1: .
+ If content is not None, replace the dot with the parenthesized
+ list of alternatives, e.g. (a b c | d e | f g h)*
+ """
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
f = lambda s: tuple(s)
@@ -514,6 +759,7 @@ self.name = name
def optimize(self) -> Any:
+ """Optimize certain stacked wildcard patterns."""
subpattern = None
if (
self.content is not None
@@ -541,9 +787,11 @@ return self
def match(self, node, results=None) -> bool:
+ """Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None) -> bool:
+ """Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
@@ -554,6 +802,17 @@ return False
def generate_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
+ """
+ Generator yielding matches for a sequence of nodes.
+
+ Args:
+ nodes: sequence of nodes
+
+ Yields:
+ (count, results) tuples where:
+ count: the match comprises nodes[:count];
+ results: dict containing named submatches.
+ """
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in range(self.min, 1 + min(len(nodes), self.max)):
@@ -588,6 +847,7 @@ sys.stderr = save_stderr
def _iterative_matches(self, nodes) -> Iterator[tuple[int, _Results]]:
+ """Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
@@ -616,6 +876,7 @@ results = new_results
def _bare_name_matches(self, nodes) -> tuple[int, _Results]:
+ """Special optimized matcher for bare_name."""
count = 0
r = {} # type: _Results
done = False
@@ -632,6 +893,7 @@ return count, r
def _recursive_matches(self, nodes, count) -> Iterator[tuple[int, _Results]]:
+ """Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
@@ -647,6 +909,14 @@
class NegatedPattern(BasePattern):
def __init__(self, content: BasePattern | None = None) -> None:
+ """
+ Initializer.
+
+ The argument is either a pattern or None. If it is None, this
+ only matches an empty sequence (effectively '$' in regex
+ lingo). If it is not None, this matches whenever the argument
+ pattern doesn't have any matches.
+ """
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
@@ -674,6 +944,18 @@ def generate_matches(
patterns: list[BasePattern], nodes: list[NL]
) -> Iterator[tuple[int, _Results]]:
+ """
+ Generator yielding matches for a sequence of patterns and nodes.
+
+ Args:
+ patterns: a sequence of patterns
+ nodes: a sequence of nodes
+
+ Yields:
+ (count, results) tuples where:
+ count: the entire sequence of patterns matches nodes[:count];
+ results: dict containing named submatches.
+ """
if not patterns:
yield 0, {}
else:
@@ -686,4 +968,4 @@ r = {}
r.update(r0)
r.update(r1)
- yield c0 + c1, r+ yield c0 + c1, r
| https://raw.githubusercontent.com/psf/black/HEAD/src/blib2to3/pytree.py |
Add documentation for all methods |
import re
from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import Callable, Collection, Iterable, Iterator, Sequence
from dataclasses import dataclass
from typing import Any, ClassVar, Final, Literal, TypeVar, Union
from mypy_extensions import trait
from black.comments import contains_pragma_comment
from black.lines import Line, append_leaves
from black.mode import Feature, Mode
from black.nodes import (
CLOSING_BRACKETS,
OPENING_BRACKETS,
STANDALONE_COMMENT,
is_empty_lpar,
is_empty_par,
is_empty_rpar,
is_part_of_annotation,
parent_type,
replace_child,
syms,
)
from black.rusty import Err, Ok, Result
from black.strings import (
assert_is_leaf_string,
count_chars_in_width,
get_string_prefix,
has_triple_quotes,
normalize_string_quotes,
str_width,
)
from blib2to3.pgen2 import token
from blib2to3.pytree import Leaf, Node
class CannotTransform(Exception):
# types
T = TypeVar("T")
LN = Union[Leaf, Node]
Transformer = Callable[[Line, Collection[Feature], Mode], Iterator[Line]]
Index = int
NodeType = int
ParserState = int
StringID = int
TResult = Result[T, CannotTransform] # (T)ransform Result
TMatchResult = TResult[list[Index]]
SPLIT_SAFE_CHARS = frozenset(["\u3001", "\u3002", "\uff0c"]) # East Asian stops
def TErr(err_msg: str) -> Err[CannotTransform]:
cant_transform = CannotTransform(err_msg)
return Err(cant_transform)
# Remove when `simplify_power_operator_hugging` becomes stable.
def hug_power_op(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
# Performance optimization to avoid unnecessary Leaf clones and other ops.
for leaf in line.leaves:
if leaf.type == token.DOUBLESTAR:
break
else:
raise CannotTransform("No doublestar token was found in the line.")
def is_simple_lookup(index: int, kind: Literal[1, -1]) -> bool:
# Brackets and parentheses indicate calls, subscripts, etc. ...
# basically stuff that doesn't count as "simple". Only a NAME lookup
# or dotted lookup (eg. NAME.NAME) is OK.
if kind == -1:
return handle_is_simple_look_up_prev(line, index, {token.RPAR, token.RSQB})
else:
return handle_is_simple_lookup_forward(
line, index, {token.LPAR, token.LSQB}
)
def is_simple_operand(index: int, kind: Literal[1, -1]) -> bool:
# An operand is considered "simple" if's a NAME, a numeric CONSTANT, a simple
# lookup (see above), with or without a preceding unary operator.
start = line.leaves[index]
if start.type in {token.NAME, token.NUMBER}:
return is_simple_lookup(index, kind)
if start.type in {token.PLUS, token.MINUS, token.TILDE}:
if line.leaves[index + 1].type in {token.NAME, token.NUMBER}:
# kind is always one as bases with a preceding unary op will be checked
# for simplicity starting from the next token (so it'll hit the check
# above).
return is_simple_lookup(index + 1, kind=1)
return False
new_line = line.clone()
should_hug = False
for idx, leaf in enumerate(line.leaves):
new_leaf = leaf.clone()
if should_hug:
new_leaf.prefix = ""
should_hug = False
should_hug = (
(0 < idx < len(line.leaves) - 1)
and leaf.type == token.DOUBLESTAR
and is_simple_operand(idx - 1, kind=-1)
and line.leaves[idx - 1].value != "lambda"
and is_simple_operand(idx + 1, kind=1)
)
if should_hug:
new_leaf.prefix = ""
# We have to be careful to make a new line properly:
# - bracket related metadata must be maintained (handled by Line.append)
# - comments need to copied over, updating the leaf IDs they're attached to
new_line.append(new_leaf, preformatted=True)
for comment_leaf in line.comments_after(leaf):
new_line.append(comment_leaf, preformatted=True)
yield new_line
# Remove when `simplify_power_operator_hugging` becomes stable.
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool:
contains_disallowed = False
chain = []
while 0 <= index < len(line.leaves):
current = line.leaves[index]
chain.append(current)
if not contains_disallowed and current.type in disallowed:
contains_disallowed = True
if not is_expression_chained(chain):
return not contains_disallowed
index -= 1
return True
# Remove when `simplify_power_operator_hugging` becomes stable.
def handle_is_simple_lookup_forward(
line: Line, index: int, disallowed: set[int]
) -> bool:
while 0 <= index < len(line.leaves):
current = line.leaves[index]
if current.type in disallowed:
return False
if current.type not in {token.NAME, token.DOT} or (
current.type == token.NAME and current.value == "for"
):
# If the current token isn't disallowed, we'll assume this is simple as
# only the disallowed tokens are semantically attached to this lookup
# expression we're checking. Also, stop early if we hit the 'for' bit
# of a comprehension.
return True
index += 1
return True
# Remove when `simplify_power_operator_hugging` becomes stable.
def is_expression_chained(chained_leaves: list[Leaf]) -> bool:
if len(chained_leaves) < 2:
return True
current_leaf = chained_leaves[-1]
past_leaf = chained_leaves[-2]
if past_leaf.type == token.NAME:
return current_leaf.type in {token.DOT}
elif past_leaf.type in {token.RPAR, token.RSQB}:
return current_leaf.type in {token.RSQB, token.RPAR}
elif past_leaf.type in {token.LPAR, token.LSQB}:
return current_leaf.type in {token.NAME, token.LPAR, token.LSQB}
else:
return False
class StringTransformer(ABC):
__name__: Final = "StringTransformer"
# Ideally this would be a dataclass, but unfortunately mypyc breaks when used with
# `abc.ABC`.
def __init__(self, line_length: int, normalize_strings: bool) -> None:
self.line_length = line_length
self.normalize_strings = normalize_strings
@abstractmethod
def do_match(self, line: Line) -> TMatchResult:
@abstractmethod
def do_transform(
self, line: Line, string_indices: list[int]
) -> Iterator[TResult[Line]]:
def __call__(
self, line: Line, _features: Collection[Feature], _mode: Mode
) -> Iterator[Line]:
# Optimization to avoid calling `self.do_match(...)` when the line does
# not contain any string.
if not any(leaf.type == token.STRING for leaf in line.leaves):
raise CannotTransform("There are no strings in this line.")
match_result = self.do_match(line)
if isinstance(match_result, Err):
cant_transform = match_result.err()
raise CannotTransform(
f"The string transformer {self.__class__.__name__} does not recognize"
" this line as one that it can transform."
) from cant_transform
string_indices = match_result.ok()
for line_result in self.do_transform(line, string_indices):
if isinstance(line_result, Err):
cant_transform = line_result.err()
raise CannotTransform(
"StringTransformer failed while attempting to transform string."
) from cant_transform
line = line_result.ok()
yield line
@dataclass
class CustomSplit:
has_prefix: bool
break_idx: int
CustomSplitMapKey = tuple[StringID, str]
@trait
class CustomSplitMapMixin:
_CUSTOM_SPLIT_MAP: ClassVar[dict[CustomSplitMapKey, tuple[CustomSplit, ...]]] = (
defaultdict(tuple)
)
@staticmethod
def _get_key(string: str) -> CustomSplitMapKey:
return (id(string), string)
def add_custom_splits(
self, string: str, custom_splits: Iterable[CustomSplit]
) -> None:
key = self._get_key(string)
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
def pop_custom_splits(self, string: str) -> list[CustomSplit]:
key = self._get_key(string)
custom_splits = self._CUSTOM_SPLIT_MAP[key]
del self._CUSTOM_SPLIT_MAP[key]
return list(custom_splits)
def has_custom_splits(self, string: str) -> bool:
key = self._get_key(string)
return key in self._CUSTOM_SPLIT_MAP
class StringMerger(StringTransformer, CustomSplitMapMixin):
def do_match(self, line: Line) -> TMatchResult:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
string_indices = []
idx = 0
while is_valid_index(idx):
leaf = LL[idx]
if (
leaf.type == token.STRING
and is_valid_index(idx + 1)
and LL[idx + 1].type == token.STRING
):
# Let's check if the string group contains an inline comment
# If we have a comment inline, we don't merge the strings
contains_comment = False
i = idx
while is_valid_index(i):
if LL[i].type != token.STRING:
break
if line.comments_after(LL[i]):
contains_comment = True
break
i += 1
if not contains_comment and not is_part_of_annotation(leaf):
string_indices.append(idx)
# Advance to the next non-STRING leaf.
idx += 2
while is_valid_index(idx) and LL[idx].type == token.STRING:
idx += 1
elif leaf.type == token.STRING and "\\\n" in leaf.value:
string_indices.append(idx)
# Advance to the next non-STRING leaf.
idx += 1
while is_valid_index(idx) and LL[idx].type == token.STRING:
idx += 1
else:
idx += 1
if string_indices:
return Ok(string_indices)
else:
return TErr("This line has no strings that need merging.")
def do_transform(
self, line: Line, string_indices: list[int]
) -> Iterator[TResult[Line]]:
new_line = line
rblc_result = self._remove_backslash_line_continuation_chars(
new_line, string_indices
)
if isinstance(rblc_result, Ok):
new_line = rblc_result.ok()
msg_result = self._merge_string_group(new_line, string_indices)
if isinstance(msg_result, Ok):
new_line = msg_result.ok()
if isinstance(rblc_result, Err) and isinstance(msg_result, Err):
msg_cant_transform = msg_result.err()
rblc_cant_transform = rblc_result.err()
cant_transform = CannotTransform(
"StringMerger failed to merge any strings in this line."
)
# Chain the errors together using `__cause__`.
msg_cant_transform.__cause__ = rblc_cant_transform
cant_transform.__cause__ = msg_cant_transform
yield Err(cant_transform)
else:
yield Ok(new_line)
@staticmethod
def _remove_backslash_line_continuation_chars(
line: Line, string_indices: list[int]
) -> TResult[Line]:
LL = line.leaves
indices_to_transform = []
for string_idx in string_indices:
string_leaf = LL[string_idx]
if (
string_leaf.type == token.STRING
and "\\\n" in string_leaf.value
and not has_triple_quotes(string_leaf.value)
):
indices_to_transform.append(string_idx)
if not indices_to_transform:
return TErr(
"Found no string leaves that contain backslash line continuation"
" characters."
)
new_line = line.clone()
new_line.comments = line.comments.copy()
append_leaves(new_line, line, LL)
for string_idx in indices_to_transform:
new_string_leaf = new_line.leaves[string_idx]
new_string_leaf.value = new_string_leaf.value.replace("\\\n", "")
return Ok(new_line)
def _merge_string_group(
self, line: Line, string_indices: list[int]
) -> TResult[Line]:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
# A dict of {string_idx: tuple[num_of_strings, string_leaf]}.
merged_string_idx_dict: dict[int, tuple[int, Leaf]] = {}
for string_idx in string_indices:
vresult = self._validate_msg(line, string_idx)
if isinstance(vresult, Err):
continue
merged_string_idx_dict[string_idx] = self._merge_one_string_group(
LL, string_idx, is_valid_index
)
if not merged_string_idx_dict:
return TErr("No string group is merged")
# Build the final line ('new_line') that this method will later return.
new_line = line.clone()
previous_merged_string_idx = -1
previous_merged_num_of_strings = -1
for i, leaf in enumerate(LL):
if i in merged_string_idx_dict:
previous_merged_string_idx = i
previous_merged_num_of_strings, string_leaf = merged_string_idx_dict[i]
new_line.append(string_leaf)
if (
previous_merged_string_idx
<= i
< previous_merged_string_idx + previous_merged_num_of_strings
):
for comment_leaf in line.comments_after(leaf):
new_line.append(comment_leaf, preformatted=True)
continue
append_leaves(new_line, line, [leaf])
return Ok(new_line)
def _merge_one_string_group(
self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
) -> tuple[int, Leaf]:
# If the string group is wrapped inside an Atom node, we must make sure
# to later replace that Atom with our new (merged) string leaf.
atom_node = LL[string_idx].parent
# We will place BREAK_MARK in between every two substrings that we
# merge. We will then later go through our final result and use the
# various instances of BREAK_MARK we find to add the right values to
# the custom split map.
BREAK_MARK = "@@@@@ BLACK BREAKPOINT MARKER @@@@@"
QUOTE = LL[string_idx].value[-1]
def make_naked(string: str, string_prefix: str) -> str:
assert_is_leaf_string(string)
if "f" in string_prefix:
f_expressions = [
string[span[0] + 1 : span[1] - 1] # +-1 to get rid of curly braces
for span in iter_fexpr_spans(string)
]
debug_expressions_contain_visible_quotes = any(
re.search(r".*[\'\"].*(?<![!:=])={1}(?!=)(?![^\s:])", expression)
for expression in f_expressions
)
if not debug_expressions_contain_visible_quotes:
# We don't want to toggle visible quotes in debug f-strings, as
# that would modify the AST
string = _toggle_fexpr_quotes(string, QUOTE)
# After quotes toggling, quotes in expressions won't be escaped
# because quotes can't be reused in f-strings. So we can simply
# let the escaping logic below run without knowing f-string
# expressions.
RE_EVEN_BACKSLASHES = r"(?:(?<!\\)(?:\\\\)*)"
naked_string = string[len(string_prefix) + 1 : -1]
naked_string = re.sub(
"(" + RE_EVEN_BACKSLASHES + ")" + QUOTE, r"\1\\" + QUOTE, naked_string
)
return naked_string
# Holds the CustomSplit objects that will later be added to the custom
# split map.
custom_splits = []
# Temporary storage for the 'has_prefix' part of the CustomSplit objects.
prefix_tracker = []
# Sets the 'prefix' variable. This is the prefix that the final merged
# string will have.
next_str_idx = string_idx
prefix = ""
while (
not prefix
and is_valid_index(next_str_idx)
and LL[next_str_idx].type == token.STRING
):
prefix = get_string_prefix(LL[next_str_idx].value).lower()
next_str_idx += 1
# The next loop merges the string group. The final string will be
# contained in 'S'.
#
# The following convenience variables are used:
#
# S: string
# NS: naked string
# SS: next string
# NSS: naked next string
S = ""
NS = ""
num_of_strings = 0
next_str_idx = string_idx
while is_valid_index(next_str_idx) and LL[next_str_idx].type == token.STRING:
num_of_strings += 1
SS = LL[next_str_idx].value
next_prefix = get_string_prefix(SS).lower()
# If this is an f-string group but this substring is not prefixed
# with 'f'...
if "f" in prefix and "f" not in next_prefix:
# Then we must escape any braces contained in this substring.
SS = re.sub(r"(\{|\})", r"\1\1", SS)
NSS = make_naked(SS, next_prefix)
has_prefix = bool(next_prefix)
prefix_tracker.append(has_prefix)
S = prefix + QUOTE + NS + NSS + BREAK_MARK + QUOTE
NS = make_naked(S, prefix)
next_str_idx += 1
# Take a note on the index of the non-STRING leaf.
non_string_idx = next_str_idx
S_leaf = Leaf(token.STRING, S)
if self.normalize_strings:
S_leaf.value = normalize_string_quotes(S_leaf.value)
# Fill the 'custom_splits' list with the appropriate CustomSplit objects.
temp_string = S_leaf.value[len(prefix) + 1 : -1]
for has_prefix in prefix_tracker:
mark_idx = temp_string.find(BREAK_MARK)
assert (
mark_idx >= 0
), "Logic error while filling the custom string breakpoint cache."
temp_string = temp_string[mark_idx + len(BREAK_MARK) :]
breakpoint_idx = mark_idx + (len(prefix) if has_prefix else 0) + 1
custom_splits.append(CustomSplit(has_prefix, breakpoint_idx))
string_leaf = Leaf(token.STRING, S_leaf.value.replace(BREAK_MARK, ""))
if atom_node is not None:
# If not all children of the atom node are merged (this can happen
# when there is a standalone comment in the middle) ...
if non_string_idx - string_idx < len(atom_node.children):
# We need to replace the old STRING leaves with the new string leaf.
first_child_idx = LL[string_idx].remove()
for idx in range(string_idx + 1, non_string_idx):
LL[idx].remove()
if first_child_idx is not None:
atom_node.insert_child(first_child_idx, string_leaf)
else:
# Else replace the atom node with the new string leaf.
replace_child(atom_node, string_leaf)
self.add_custom_splits(string_leaf.value, custom_splits)
return num_of_strings, string_leaf
@staticmethod
def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
# We first check for "inner" stand-alone comments (i.e. stand-alone
# comments that have a string leaf before them AND after them).
for inc in [1, -1]:
i = string_idx
found_sa_comment = False
is_valid_index = is_valid_index_factory(line.leaves)
while is_valid_index(i) and line.leaves[i].type in [
token.STRING,
STANDALONE_COMMENT,
]:
if line.leaves[i].type == STANDALONE_COMMENT:
found_sa_comment = True
elif found_sa_comment:
return TErr(
"StringMerger does NOT merge string groups which contain "
"stand-alone comments."
)
i += inc
QUOTE = line.leaves[string_idx].value[-1]
num_of_inline_string_comments = 0
set_of_prefixes = set()
num_of_strings = 0
for leaf in line.leaves[string_idx:]:
if leaf.type != token.STRING:
# If the string group is trailed by a comma, we count the
# comments trailing the comma to be one of the string group's
# comments.
if leaf.type == token.COMMA and id(leaf) in line.comments:
num_of_inline_string_comments += 1
break
if has_triple_quotes(leaf.value):
return TErr("StringMerger does NOT merge multiline strings.")
num_of_strings += 1
prefix = get_string_prefix(leaf.value).lower()
if "r" in prefix:
return TErr("StringMerger does NOT merge raw strings.")
set_of_prefixes.add(prefix)
if (
"f" in prefix
and leaf.value[-1] != QUOTE
and (
"'" in leaf.value[len(prefix) + 1 : -1]
or '"' in leaf.value[len(prefix) + 1 : -1]
)
):
return TErr(
"StringMerger does NOT merge f-strings with different quote types"
" and internal quotes."
)
if id(leaf) in line.comments:
num_of_inline_string_comments += 1
if contains_pragma_comment(line.comments[id(leaf)]):
return TErr("Cannot merge strings which have pragma comments.")
if num_of_strings < 2:
return TErr(
f"Not enough strings to merge (num_of_strings={num_of_strings})."
)
if num_of_inline_string_comments > 1:
return TErr(
f"Too many inline string comments ({num_of_inline_string_comments})."
)
if len(set_of_prefixes) > 1 and set_of_prefixes != {"", "f"}:
return TErr(f"Too many different prefixes ({set_of_prefixes}).")
return Ok(None)
class StringParenStripper(StringTransformer):
def do_match(self, line: Line) -> TMatchResult:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
string_indices = []
idx = -1
while True:
idx += 1
if idx >= len(LL):
break
leaf = LL[idx]
# Should be a string...
if leaf.type != token.STRING:
continue
# If this is a "pointless" string...
if (
leaf.parent
and leaf.parent.parent
and leaf.parent.parent.type == syms.simple_stmt
):
continue
# Should be preceded by a non-empty LPAR...
if (
not is_valid_index(idx - 1)
or LL[idx - 1].type != token.LPAR
or is_empty_lpar(LL[idx - 1])
):
continue
# That LPAR should NOT be preceded by a colon (which could be a
# dictionary value), function name, or a closing bracket (which
# could be a function returning a function or a list/dictionary
# containing a function)...
if is_valid_index(idx - 2) and (
LL[idx - 2].type == token.COLON
or LL[idx - 2].type == token.NAME
or LL[idx - 2].type in CLOSING_BRACKETS
):
continue
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
next_idx = string_parser.parse(LL, string_idx)
# if the leaves in the parsed string include a PERCENT, we need to
# make sure the initial LPAR is NOT preceded by an operator with
# higher or equal precedence to PERCENT
if is_valid_index(idx - 2):
# mypy can't quite follow unless we name this
before_lpar = LL[idx - 2]
if token.PERCENT in {leaf.type for leaf in LL[idx - 1 : next_idx]} and (
(
before_lpar.type
in {
token.STAR,
token.AT,
token.SLASH,
token.DOUBLESLASH,
token.PERCENT,
token.TILDE,
token.DOUBLESTAR,
token.AWAIT,
token.LSQB,
token.LPAR,
}
)
or (
# only unary PLUS/MINUS
before_lpar.parent
and before_lpar.parent.type == syms.factor
and (before_lpar.type in {token.PLUS, token.MINUS})
)
):
continue
# Should be followed by a non-empty RPAR...
if (
is_valid_index(next_idx)
and LL[next_idx].type == token.RPAR
and not is_empty_rpar(LL[next_idx])
):
# That RPAR should NOT be followed by anything with higher
# precedence than PERCENT
if is_valid_index(next_idx + 1) and LL[next_idx + 1].type in {
token.DOUBLESTAR,
token.LSQB,
token.LPAR,
token.DOT,
}:
continue
string_indices.append(string_idx)
idx = string_idx
while idx < len(LL) - 1 and LL[idx + 1].type == token.STRING:
idx += 1
if string_indices:
return Ok(string_indices)
return TErr("This line has no strings wrapped in parens.")
def do_transform(
self, line: Line, string_indices: list[int]
) -> Iterator[TResult[Line]]:
LL = line.leaves
string_and_rpar_indices: list[int] = []
for string_idx in string_indices:
string_parser = StringParser()
rpar_idx = string_parser.parse(LL, string_idx)
should_transform = True
for leaf in (LL[string_idx - 1], LL[rpar_idx]):
if line.comments_after(leaf):
# Should not strip parentheses which have comments attached
# to them.
should_transform = False
break
if should_transform:
string_and_rpar_indices.extend((string_idx, rpar_idx))
if string_and_rpar_indices:
yield Ok(self._transform_to_new_line(line, string_and_rpar_indices))
else:
yield Err(
CannotTransform("All string groups have comments attached to them.")
)
def _transform_to_new_line(
self, line: Line, string_and_rpar_indices: list[int]
) -> Line:
LL = line.leaves
new_line = line.clone()
new_line.comments = line.comments.copy()
previous_idx = -1
# We need to sort the indices, since string_idx and its matching
# rpar_idx may not come in order, e.g. in
# `("outer" % ("inner".join(items)))`, the "inner" string's
# string_idx is smaller than "outer" string's rpar_idx.
for idx in sorted(string_and_rpar_indices):
leaf = LL[idx]
lpar_or_rpar_idx = idx - 1 if leaf.type == token.STRING else idx
append_leaves(new_line, line, LL[previous_idx + 1 : lpar_or_rpar_idx])
if leaf.type == token.STRING:
string_leaf = Leaf(token.STRING, LL[idx].value)
LL[lpar_or_rpar_idx].remove() # Remove lpar.
replace_child(LL[idx], string_leaf)
new_line.append(string_leaf)
# replace comments
old_comments = new_line.comments.pop(id(LL[idx]), [])
new_line.comments.setdefault(id(string_leaf), []).extend(old_comments)
else:
LL[lpar_or_rpar_idx].remove() # This is a rpar.
previous_idx = idx
# Append the leaves after the last idx:
append_leaves(new_line, line, LL[idx + 1 :])
return new_line
class BaseStringSplitter(StringTransformer):
STRING_OPERATORS: Final = [
token.EQEQUAL,
token.GREATER,
token.GREATEREQUAL,
token.LESS,
token.LESSEQUAL,
token.NOTEQUAL,
token.PERCENT,
token.PLUS,
token.STAR,
]
@abstractmethod
def do_splitter_match(self, line: Line) -> TMatchResult:
def do_match(self, line: Line) -> TMatchResult:
match_result = self.do_splitter_match(line)
if isinstance(match_result, Err):
return match_result
string_indices = match_result.ok()
assert len(string_indices) == 1, (
f"{self.__class__.__name__} should only find one match at a time, found"
f" {len(string_indices)}"
)
string_idx = string_indices[0]
vresult = self._validate(line, string_idx)
if isinstance(vresult, Err):
return vresult
return match_result
def _validate(self, line: Line, string_idx: int) -> TResult[None]:
LL = line.leaves
string_leaf = LL[string_idx]
max_string_length = self._get_max_string_length(line, string_idx)
if len(string_leaf.value) <= max_string_length:
return TErr(
"The string itself is not what is causing this line to be too long."
)
if not string_leaf.parent or [L.type for L in string_leaf.parent.children] == [
token.STRING,
token.NEWLINE,
]:
return TErr(
f"This string ({string_leaf.value}) appears to be pointless (i.e. has"
" no parent)."
)
if id(line.leaves[string_idx]) in line.comments and contains_pragma_comment(
line.comments[id(line.leaves[string_idx])]
):
return TErr(
"Line appears to end with an inline pragma comment. Splitting the line"
" could modify the pragma's behavior."
)
if has_triple_quotes(string_leaf.value):
return TErr("We cannot split multiline strings.")
return Ok(None)
def _get_max_string_length(self, line: Line, string_idx: int) -> int:
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
# We use the shorthand "WMA4" in comments to abbreviate "We must
# account for". When giving examples, we use STRING to mean some/any
# valid string.
#
# Finally, we use the following convenience variables:
#
# P: The leaf that is before the target string leaf.
# N: The leaf that is after the target string leaf.
# NN: The leaf that is after N.
# WMA4 the whitespace at the beginning of the line.
offset = line.depth * 4
if is_valid_index(string_idx - 1):
p_idx = string_idx - 1
if (
LL[string_idx - 1].type == token.LPAR
and LL[string_idx - 1].value == ""
and string_idx >= 2
):
# If the previous leaf is an empty LPAR placeholder, we should skip it.
p_idx -= 1
P = LL[p_idx]
if P.type in self.STRING_OPERATORS:
# WMA4 a space and a string operator (e.g. `+ STRING` or `== STRING`).
offset += len(str(P)) + 1
if P.type == token.COMMA:
# WMA4 a space, a comma, and a closing bracket [e.g. `), STRING`].
offset += 3
if P.type in [token.COLON, token.EQUAL, token.PLUSEQUAL, token.NAME]:
# This conditional branch is meant to handle dictionary keys,
# variable assignments, 'return STRING' statement lines, and
# 'else STRING' ternary expression lines.
# WMA4 a single space.
offset += 1
# WMA4 the lengths of any leaves that came before that space,
# but after any closing bracket before that space.
for leaf in reversed(LL[: p_idx + 1]):
offset += len(str(leaf))
if leaf.type in CLOSING_BRACKETS:
break
if is_valid_index(string_idx + 1):
N = LL[string_idx + 1]
if N.type == token.RPAR and N.value == "" and len(LL) > string_idx + 2:
# If the next leaf is an empty RPAR placeholder, we should skip it.
N = LL[string_idx + 2]
if N.type == token.COMMA:
# WMA4 a single comma at the end of the string (e.g `STRING,`).
offset += 1
if is_valid_index(string_idx + 2):
NN = LL[string_idx + 2]
if N.type == token.DOT and NN.type == token.NAME:
# This conditional branch is meant to handle method calls invoked
# off of a string literal up to and including the LPAR character.
# WMA4 the '.' character.
offset += 1
if (
is_valid_index(string_idx + 3)
and LL[string_idx + 3].type == token.LPAR
):
# WMA4 the left parenthesis character.
offset += 1
# WMA4 the length of the method's name.
offset += len(NN.value)
has_comments = False
for comment_leaf in line.comments_after(LL[string_idx]):
if not has_comments:
has_comments = True
# WMA4 two spaces before the '#' character.
offset += 2
# WMA4 the length of the inline comment.
offset += len(comment_leaf.value)
max_string_length = count_chars_in_width(str(line), self.line_length - offset)
return max_string_length
@staticmethod
def _prefer_paren_wrap_match(LL: list[Leaf]) -> int | None:
# The line must start with a string.
if LL[0].type != token.STRING:
return None
matching_nodes = [
syms.listmaker,
syms.dictsetmaker,
syms.testlist_gexp,
]
# If the string is an immediate child of a list/set/tuple literal...
if (
parent_type(LL[0]) in matching_nodes
or parent_type(LL[0].parent) in matching_nodes
):
# And the string is surrounded by commas (or is the first/last child)...
prev_sibling = LL[0].prev_sibling
next_sibling = LL[0].next_sibling
if (
not prev_sibling
and not next_sibling
and parent_type(LL[0]) == syms.atom
):
# If it's an atom string, we need to check the parent atom's siblings.
parent = LL[0].parent
assert parent is not None # For type checkers.
prev_sibling = parent.prev_sibling
next_sibling = parent.next_sibling
if (not prev_sibling or prev_sibling.type == token.COMMA) and (
not next_sibling or next_sibling.type == token.COMMA
):
return 0
return None
def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]:
stack: list[int] = [] # our curly paren stack
i = 0
while i < len(s):
if s[i] == "{":
# if we're in a string part of the f-string, ignore escaped curly braces
if not stack and i + 1 < len(s) and s[i + 1] == "{":
i += 2
continue
stack.append(i)
i += 1
continue
if s[i] == "}":
if not stack:
i += 1
continue
j = stack.pop()
# we've made it back out of the expression! yield the span
if not stack:
yield (j, i + 1)
i += 1
continue
# if we're in an expression part of the f-string, fast-forward through strings
# note that backslashes are not legal in the expression portion of f-strings
if stack:
delim = None
if s[i : i + 3] in ("'''", '"""'):
delim = s[i : i + 3]
elif s[i] in ("'", '"'):
delim = s[i]
if delim:
i += len(delim)
while i < len(s) and s[i : i + len(delim)] != delim:
i += 1
i += len(delim)
continue
i += 1
def fstring_contains_expr(s: str) -> bool:
return any(iter_fexpr_spans(s))
def _toggle_fexpr_quotes(fstring: str, old_quote: str) -> str:
new_quote = "'" if old_quote == '"' else '"'
parts = []
previous_index = 0
for start, end in iter_fexpr_spans(fstring):
parts.append(fstring[previous_index:start])
parts.append(fstring[start:end].replace(old_quote, new_quote))
previous_index = end
parts.append(fstring[previous_index:])
return "".join(parts)
class StringSplitter(BaseStringSplitter, CustomSplitMapMixin):
MIN_SUBSTR_SIZE: Final = 6
def do_splitter_match(self, line: Line) -> TMatchResult:
LL = line.leaves
if self._prefer_paren_wrap_match(LL) is not None:
return TErr("Line needs to be wrapped in parens first.")
# If the line is just STRING + COMMA (a one-item tuple) and not inside
# brackets, we need to defer to StringParenWrapper to wrap it first.
# Otherwise, splitting the string would create multiple expressions where
# only the last has the comma, breaking AST equivalence. See issue #4912.
if (
not line.inside_brackets
and len(LL) == 2
and LL[0].type == token.STRING
and LL[1].type == token.COMMA
):
return TErr(
"Line with trailing comma tuple needs to be wrapped in parens first."
)
is_valid_index = is_valid_index_factory(LL)
idx = 0
# The first two leaves MAY be the 'not in' keywords...
if (
is_valid_index(idx)
and is_valid_index(idx + 1)
and [LL[idx].type, LL[idx + 1].type] == [token.NAME, token.NAME]
and str(LL[idx]) + str(LL[idx + 1]) == "not in"
):
idx += 2
# Else the first leaf MAY be a string operator symbol or the 'in' keyword...
elif is_valid_index(idx) and (
LL[idx].type in self.STRING_OPERATORS
or LL[idx].type == token.NAME
and str(LL[idx]) == "in"
):
idx += 1
# The next/first leaf MAY be an empty LPAR...
if is_valid_index(idx) and is_empty_lpar(LL[idx]):
idx += 1
# The next/first leaf MUST be a string...
if not is_valid_index(idx) or LL[idx].type != token.STRING:
return TErr("Line does not start with a string.")
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# That string MAY be followed by an empty RPAR...
if is_valid_index(idx) and is_empty_rpar(LL[idx]):
idx += 1
# That string / empty RPAR leaf MAY be followed by a comma...
if is_valid_index(idx) and LL[idx].type == token.COMMA:
idx += 1
# But no more leaves are allowed...
if is_valid_index(idx):
return TErr("This line does not end with a string.")
return Ok([string_idx])
def do_transform(
self, line: Line, string_indices: list[int]
) -> Iterator[TResult[Line]]:
LL = line.leaves
assert len(string_indices) == 1, (
f"{self.__class__.__name__} should only find one match at a time, found"
f" {len(string_indices)}"
)
string_idx = string_indices[0]
QUOTE = LL[string_idx].value[-1]
is_valid_index = is_valid_index_factory(LL)
insert_str_child = insert_str_child_factory(LL[string_idx])
prefix = get_string_prefix(LL[string_idx].value).lower()
# We MAY choose to drop the 'f' prefix from substrings that don't
# contain any f-expressions, but ONLY if the original f-string
# contains at least one f-expression. Otherwise, we will alter the AST
# of the program.
drop_pointless_f_prefix = ("f" in prefix) and fstring_contains_expr(
LL[string_idx].value
)
first_string_line = True
string_op_leaves = self._get_string_operator_leaves(LL)
string_op_leaves_length = (
sum(len(str(prefix_leaf)) for prefix_leaf in string_op_leaves) + 1
if string_op_leaves
else 0
)
def maybe_append_string_operators(new_line: Line) -> None:
maybe_prefix_leaves = string_op_leaves if first_string_line else []
for i, prefix_leaf in enumerate(maybe_prefix_leaves):
replace_child(LL[i], prefix_leaf)
new_line.append(prefix_leaf)
ends_with_comma = (
is_valid_index(string_idx + 1) and LL[string_idx + 1].type == token.COMMA
)
def max_last_string_column() -> int:
result = self.line_length
result -= line.depth * 4
result -= 1 if ends_with_comma else 0
result -= string_op_leaves_length
return result
# --- Calculate Max Break Width (for string value)
# We start with the line length limit
max_break_width = self.line_length
# The last index of a string of length N is N-1.
max_break_width -= 1
# Leading whitespace is not present in the string value (e.g. Leaf.value).
max_break_width -= line.depth * 4
if max_break_width < 0:
yield TErr(
f"Unable to split {LL[string_idx].value} at such high of a line depth:"
f" {line.depth}"
)
return
# Check if StringMerger registered any custom splits.
custom_splits = self.pop_custom_splits(LL[string_idx].value)
# We use them ONLY if none of them would produce lines that exceed the
# line limit.
use_custom_breakpoints = bool(
custom_splits
and all(csplit.break_idx <= max_break_width for csplit in custom_splits)
)
# Temporary storage for the remaining chunk of the string line that
# can't fit onto the line currently being constructed.
rest_value = LL[string_idx].value
def more_splits_should_be_made() -> bool:
if use_custom_breakpoints:
return len(custom_splits) > 1
else:
return str_width(rest_value) > max_last_string_column()
string_line_results: list[Ok[Line]] = []
while more_splits_should_be_made():
if use_custom_breakpoints:
# Custom User Split (manual)
csplit = custom_splits.pop(0)
break_idx = csplit.break_idx
else:
# Algorithmic Split (automatic)
max_bidx = (
count_chars_in_width(rest_value, max_break_width)
- string_op_leaves_length
)
maybe_break_idx = self._get_break_idx(rest_value, max_bidx)
if maybe_break_idx is None:
# If we are unable to algorithmically determine a good split
# and this string has custom splits registered to it, we
# fall back to using them--which means we have to start
# over from the beginning.
if custom_splits:
rest_value = LL[string_idx].value
string_line_results = []
first_string_line = True
use_custom_breakpoints = True
continue
# Otherwise, we stop splitting here.
break
break_idx = maybe_break_idx
# --- Construct `next_value`
next_value = rest_value[:break_idx] + QUOTE
# HACK: The following 'if' statement is a hack to fix the custom
# breakpoint index in the case of either: (a) substrings that were
# f-strings but will have the 'f' prefix removed OR (b) substrings
# that were not f-strings but will now become f-strings because of
# redundant use of the 'f' prefix (i.e. none of the substrings
# contain f-expressions but one or more of them had the 'f' prefix
# anyway; in which case, we will prepend 'f' to _all_ substrings).
#
# There is probably a better way to accomplish what is being done
# here...
#
# If this substring is an f-string, we _could_ remove the 'f'
# prefix, and the current custom split did NOT originally use a
# prefix...
if (
use_custom_breakpoints
and not csplit.has_prefix
and (
# `next_value == prefix + QUOTE` happens when the custom
# split is an empty string.
next_value == prefix + QUOTE
or next_value != self._normalize_f_string(next_value, prefix)
)
):
# Then `csplit.break_idx` will be off by one after removing
# the 'f' prefix.
break_idx += 1
next_value = rest_value[:break_idx] + QUOTE
if drop_pointless_f_prefix:
next_value = self._normalize_f_string(next_value, prefix)
# --- Construct `next_leaf`
next_leaf = Leaf(token.STRING, next_value)
insert_str_child(next_leaf)
self._maybe_normalize_string_quotes(next_leaf)
# --- Construct `next_line`
next_line = line.clone()
maybe_append_string_operators(next_line)
next_line.append(next_leaf)
string_line_results.append(Ok(next_line))
rest_value = prefix + QUOTE + rest_value[break_idx:]
first_string_line = False
yield from string_line_results
if drop_pointless_f_prefix:
rest_value = self._normalize_f_string(rest_value, prefix)
rest_leaf = Leaf(token.STRING, rest_value)
insert_str_child(rest_leaf)
# NOTE: I could not find a test case that verifies that the following
# line is actually necessary, but it seems to be. Otherwise we risk
# not normalizing the last substring, right?
self._maybe_normalize_string_quotes(rest_leaf)
last_line = line.clone()
maybe_append_string_operators(last_line)
# If there are any leaves to the right of the target string...
if is_valid_index(string_idx + 1):
# We use `temp_value` here to determine how long the last line
# would be if we were to append all the leaves to the right of the
# target string to the last string line.
temp_value = rest_value
for leaf in LL[string_idx + 1 :]:
temp_value += str(leaf)
if leaf.type == token.LPAR:
break
# Try to fit them all on the same line with the last substring...
if (
str_width(temp_value) <= max_last_string_column()
or LL[string_idx + 1].type == token.COMMA
):
last_line.append(rest_leaf)
append_leaves(last_line, line, LL[string_idx + 1 :])
yield Ok(last_line)
# Otherwise, place the last substring on one line and everything
# else on a line below that...
else:
last_line.append(rest_leaf)
yield Ok(last_line)
non_string_line = line.clone()
append_leaves(non_string_line, line, LL[string_idx + 1 :])
yield Ok(non_string_line)
# Else the target string was the last leaf...
else:
last_line.append(rest_leaf)
last_line.comments = line.comments.copy()
yield Ok(last_line)
def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
# True - the previous backslash was unescaped
# False - the previous backslash was escaped *or* there was no backslash
previous_was_unescaped_backslash = False
it = iter(enumerate(string))
for idx, c in it:
if c == "\\":
previous_was_unescaped_backslash = not previous_was_unescaped_backslash
continue
if not previous_was_unescaped_backslash or c != "N":
previous_was_unescaped_backslash = False
continue
previous_was_unescaped_backslash = False
begin = idx - 1 # the position of backslash before \N{...}
for idx, c in it:
if c == "}":
end = idx
break
else:
# malformed nameescape expression?
# should have been detected by AST parsing earlier...
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
yield begin, end
def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
if "f" not in get_string_prefix(string).lower():
return
yield from iter_fexpr_spans(string)
def _get_illegal_split_indices(self, string: str) -> set[Index]:
illegal_indices: set[Index] = set()
iterators = [
self._iter_fexpr_slices(string),
self._iter_nameescape_slices(string),
]
for it in iterators:
for begin, end in it:
illegal_indices.update(range(begin, end))
return illegal_indices
def _get_break_idx(self, string: str, max_break_idx: int) -> int | None:
is_valid_index = is_valid_index_factory(string)
assert is_valid_index(max_break_idx)
assert_is_leaf_string(string)
_illegal_split_indices = self._get_illegal_split_indices(string)
def breaks_unsplittable_expression(i: Index) -> bool:
return i in _illegal_split_indices
def passes_all_checks(i: Index) -> bool:
is_space = string[i] == " "
is_split_safe = is_valid_index(i - 1) and string[i - 1] in SPLIT_SAFE_CHARS
is_not_escaped = True
j = i - 1
while is_valid_index(j) and string[j] == "\\":
is_not_escaped = not is_not_escaped
j -= 1
is_big_enough = (
len(string[i:]) >= self.MIN_SUBSTR_SIZE
and len(string[:i]) >= self.MIN_SUBSTR_SIZE
)
return (
(is_space or is_split_safe)
and is_not_escaped
and is_big_enough
and not breaks_unsplittable_expression(i)
)
# First, we check all indices BELOW @max_break_idx.
break_idx = max_break_idx
while is_valid_index(break_idx - 1) and not passes_all_checks(break_idx):
break_idx -= 1
if not passes_all_checks(break_idx):
# If that fails, we check all indices ABOVE @max_break_idx.
#
# If we are able to find a valid index here, the next line is going
# to be longer than the specified line length, but it's probably
# better than doing nothing at all.
break_idx = max_break_idx + 1
while is_valid_index(break_idx + 1) and not passes_all_checks(break_idx):
break_idx += 1
if not is_valid_index(break_idx) or not passes_all_checks(break_idx):
return None
return break_idx
def _maybe_normalize_string_quotes(self, leaf: Leaf) -> None:
if self.normalize_strings:
leaf.value = normalize_string_quotes(leaf.value)
def _normalize_f_string(self, string: str, prefix: str) -> str:
assert_is_leaf_string(string)
if "f" in prefix and not fstring_contains_expr(string):
new_prefix = prefix.replace("f", "")
temp = string[len(prefix) :]
temp = re.sub(r"\{\{", "{", temp)
temp = re.sub(r"\}\}", "}", temp)
new_string = temp
return f"{new_prefix}{new_string}"
else:
return string
def _get_string_operator_leaves(self, leaves: Iterable[Leaf]) -> list[Leaf]:
LL = list(leaves)
string_op_leaves = []
i = 0
while LL[i].type in self.STRING_OPERATORS + [token.NAME]:
prefix_leaf = Leaf(LL[i].type, str(LL[i]).strip())
string_op_leaves.append(prefix_leaf)
i += 1
return string_op_leaves
class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin):
def do_splitter_match(self, line: Line) -> TMatchResult:
LL = line.leaves
if line.leaves[-1].type in OPENING_BRACKETS:
return TErr(
"Cannot wrap parens around a line that ends in an opening bracket."
)
string_idx = (
self._return_match(LL)
or self._else_match(LL)
or self._assert_match(LL)
or self._assign_match(LL)
or self._dict_or_lambda_match(LL)
)
if string_idx is None:
string_idx = self._trailing_comma_tuple_match(line)
if string_idx is None:
string_idx = self._prefer_paren_wrap_match(LL)
if string_idx is not None:
string_value = line.leaves[string_idx].value
# If the string has neither spaces nor East Asian stops...
if not any(
char == " " or char in SPLIT_SAFE_CHARS for char in string_value
):
# And will still violate the line length limit when split...
max_string_width = self.line_length - ((line.depth + 1) * 4)
if str_width(string_value) > max_string_width:
# And has no associated custom splits...
if not self.has_custom_splits(string_value):
# Then we should NOT put this string on its own line.
return TErr(
"We do not wrap long strings in parentheses when the"
" resultant line would still be over the specified line"
" length and can't be split further by StringSplitter."
)
return Ok([string_idx])
return TErr("This line does not contain any non-atomic strings.")
@staticmethod
def _return_match(LL: list[Leaf]) -> int | None:
# If this line is a part of a return/yield statement and the first leaf
# contains either the "return" or "yield" keywords...
if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[
0
].value in ["return", "yield"]:
is_valid_index = is_valid_index_factory(LL)
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
# The next visible leaf MUST contain a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
@staticmethod
def _else_match(LL: list[Leaf]) -> int | None:
# If this line is a part of a ternary expression and the first leaf
# contains the "else" keyword...
if (
parent_type(LL[0]) == syms.test
and LL[0].type == token.NAME
and LL[0].value == "else"
):
is_valid_index = is_valid_index_factory(LL)
idx = 2 if is_valid_index(1) and is_empty_par(LL[1]) else 1
# The next visible leaf MUST contain a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
return idx
return None
@staticmethod
def _assert_match(LL: list[Leaf]) -> int | None:
# If this line is a part of an assert statement and the first leaf
# contains the "assert" keyword...
if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert":
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find a comma...
if leaf.type == token.COMMA:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That comma MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
@staticmethod
def _assign_match(LL: list[Leaf]) -> int | None:
# If this line is a part of an expression statement or is a function
# argument AND the first leaf contains a variable name...
if (
parent_type(LL[0]) in [syms.expr_stmt, syms.argument, syms.power]
and LL[0].type == token.NAME
):
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find either an '=' or '+=' symbol...
if leaf.type in [token.EQUAL, token.PLUSEQUAL]:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That symbol MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# The next leaf MAY be a comma iff this line is a part
# of a function argument...
if (
parent_type(LL[0]) == syms.argument
and is_valid_index(idx)
and LL[idx].type == token.COMMA
):
idx += 1
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
@staticmethod
def _dict_or_lambda_match(LL: list[Leaf]) -> int | None:
# If this line is a part of a dictionary key assignment or lambda expression...
parent_types = [parent_type(LL[0]), parent_type(LL[0].parent)]
if syms.dictsetmaker in parent_types or syms.lambdef in parent_types:
is_valid_index = is_valid_index_factory(LL)
for i, leaf in enumerate(LL):
# We MUST find a colon, it can either be dict's or lambda's colon...
if leaf.type == token.COLON and i < len(LL) - 1:
idx = i + 2 if is_empty_par(LL[i + 1]) else i + 1
# That colon MUST be followed by a string...
if is_valid_index(idx) and LL[idx].type == token.STRING:
string_idx = idx
# Skip the string trailer, if one exists.
string_parser = StringParser()
idx = string_parser.parse(LL, string_idx)
# That string MAY be followed by a comma...
if is_valid_index(idx) and LL[idx].type == token.COMMA:
idx += 1
# But no more leaves are allowed...
if not is_valid_index(idx):
return string_idx
return None
@staticmethod
def _trailing_comma_tuple_match(line: Line) -> int | None:
LL = line.leaves
# Match: STRING followed by COMMA, not inside brackets
if (
not line.inside_brackets
and len(LL) == 2
and LL[0].type == token.STRING
and LL[1].type == token.COMMA
):
return 0
return None
def do_transform(
self, line: Line, string_indices: list[int]
) -> Iterator[TResult[Line]]:
LL = line.leaves
assert len(string_indices) == 1, (
f"{self.__class__.__name__} should only find one match at a time, found"
f" {len(string_indices)}"
)
string_idx = string_indices[0]
is_valid_index = is_valid_index_factory(LL)
insert_str_child = insert_str_child_factory(LL[string_idx])
comma_idx = -1
ends_with_comma = False
if LL[comma_idx].type == token.COMMA:
ends_with_comma = True
leaves_to_steal_comments_from = [LL[string_idx]]
if ends_with_comma:
leaves_to_steal_comments_from.append(LL[comma_idx])
# --- First Line
first_line = line.clone()
left_leaves = LL[:string_idx]
# We have to remember to account for (possibly invisible) LPAR and RPAR
# leaves that already wrapped the target string. If these leaves do
# exist, we will replace them with our own LPAR and RPAR leaves.
old_parens_exist = False
if left_leaves and left_leaves[-1].type == token.LPAR:
old_parens_exist = True
leaves_to_steal_comments_from.append(left_leaves[-1])
left_leaves.pop()
append_leaves(first_line, line, left_leaves)
lpar_leaf = Leaf(token.LPAR, "(")
if old_parens_exist:
replace_child(LL[string_idx - 1], lpar_leaf)
else:
insert_str_child(lpar_leaf)
first_line.append(lpar_leaf)
# We throw inline comments that were originally to the right of the
# target string to the top line. They will now be shown to the right of
# the LPAR.
for leaf in leaves_to_steal_comments_from:
for comment_leaf in line.comments_after(leaf):
first_line.append(comment_leaf, preformatted=True)
yield Ok(first_line)
# --- Middle (String) Line
# We only need to yield one (possibly too long) string line, since the
# `StringSplitter` will break it down further if necessary.
string_value = LL[string_idx].value
string_line = Line(
mode=line.mode,
depth=line.depth + 1,
inside_brackets=True,
should_split_rhs=line.should_split_rhs,
magic_trailing_comma=line.magic_trailing_comma,
)
string_leaf = Leaf(token.STRING, string_value)
insert_str_child(string_leaf)
string_line.append(string_leaf)
old_rpar_leaf = None
if is_valid_index(string_idx + 1):
right_leaves = LL[string_idx + 1 :]
if ends_with_comma:
right_leaves.pop()
if old_parens_exist:
assert right_leaves and right_leaves[-1].type == token.RPAR, (
"Apparently, old parentheses do NOT exist?!"
f" (left_leaves={left_leaves}, right_leaves={right_leaves})"
)
old_rpar_leaf = right_leaves.pop()
elif right_leaves and right_leaves[-1].type == token.RPAR:
# Special case for lambda expressions as dict's value, e.g.:
# my_dict = {
# "key": lambda x: f"formatted: {x}",
# }
# After wrapping the dict's value with parentheses, the string is
# followed by a RPAR but its opening bracket is lambda's, not
# the string's:
# "key": (lambda x: f"formatted: {x}"),
opening_bracket = right_leaves[-1].opening_bracket
if opening_bracket is not None and opening_bracket in left_leaves:
index = left_leaves.index(opening_bracket)
if (
0 < index < len(left_leaves) - 1
and left_leaves[index - 1].type == token.COLON
and left_leaves[index + 1].value == "lambda"
):
right_leaves.pop()
append_leaves(string_line, line, right_leaves)
yield Ok(string_line)
# --- Last Line
last_line = line.clone()
last_line.bracket_tracker = first_line.bracket_tracker
new_rpar_leaf = Leaf(token.RPAR, ")")
if old_rpar_leaf is not None:
replace_child(old_rpar_leaf, new_rpar_leaf)
else:
insert_str_child(new_rpar_leaf)
last_line.append(new_rpar_leaf)
# If the target string ended with a comma, we place this comma to the
# right of the RPAR on the last line.
if ends_with_comma:
comma_leaf = Leaf(token.COMMA, ",")
replace_child(LL[comma_idx], comma_leaf)
last_line.append(comma_leaf)
yield Ok(last_line)
class StringParser:
DEFAULT_TOKEN: Final = 20210605
# String Parser States
START: Final = 1
DOT: Final = 2
NAME: Final = 3
PERCENT: Final = 4
SINGLE_FMT_ARG: Final = 5
LPAR: Final = 6
RPAR: Final = 7
DONE: Final = 8
# Lookup Table for Next State
_goto: Final[dict[tuple[ParserState, NodeType], ParserState]] = {
# A string trailer may start with '.' OR '%'.
(START, token.DOT): DOT,
(START, token.PERCENT): PERCENT,
(START, DEFAULT_TOKEN): DONE,
# A '.' MUST be followed by an attribute or method name.
(DOT, token.NAME): NAME,
# A method name MUST be followed by an '(', whereas an attribute name
# is the last symbol in the string trailer.
(NAME, token.LPAR): LPAR,
(NAME, DEFAULT_TOKEN): DONE,
# A '%' symbol can be followed by an '(' or a single argument (e.g. a
# string or variable name).
(PERCENT, token.LPAR): LPAR,
(PERCENT, DEFAULT_TOKEN): SINGLE_FMT_ARG,
# If a '%' symbol is followed by a single argument, that argument is
# the last leaf in the string trailer.
(SINGLE_FMT_ARG, DEFAULT_TOKEN): DONE,
# If present, a ')' symbol is the last symbol in a string trailer.
# (NOTE: LPARS and nested RPARS are not included in this lookup table,
# since they are treated as a special case by the parsing logic in this
# classes' implementation.)
(RPAR, DEFAULT_TOKEN): DONE,
}
def __init__(self) -> None:
self._state = self.START
self._unmatched_lpars = 0
def parse(self, leaves: list[Leaf], string_idx: int) -> int:
assert leaves[string_idx].type == token.STRING
idx = string_idx + 1
while idx < len(leaves) and self._next_state(leaves[idx]):
idx += 1
return idx
def _next_state(self, leaf: Leaf) -> bool:
# We ignore empty LPAR or RPAR leaves.
if is_empty_par(leaf):
return True
next_token = leaf.type
if next_token == token.LPAR:
self._unmatched_lpars += 1
current_state = self._state
# The LPAR parser state is a special case. We will return True until we
# find the matching RPAR token.
if current_state == self.LPAR:
if next_token == token.RPAR:
self._unmatched_lpars -= 1
if self._unmatched_lpars == 0:
self._state = self.RPAR
# Otherwise, we use a lookup table to determine the next state.
else:
# If the lookup table matches the current state to the next
# token, we use the lookup table.
if (current_state, next_token) in self._goto:
self._state = self._goto[current_state, next_token]
else:
# Otherwise, we check if a the current state was assigned a
# default.
if (current_state, self.DEFAULT_TOKEN) in self._goto:
self._state = self._goto[current_state, self.DEFAULT_TOKEN]
# If no default has been assigned, then this parser has a logic
# error.
else:
raise RuntimeError(f"{self.__class__.__name__} LOGIC ERROR!")
if self._state == self.DONE:
return False
return True
def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]:
string_parent = string_leaf.parent
string_child_idx = string_leaf.remove()
def insert_str_child(child: LN) -> None:
nonlocal string_child_idx
assert string_parent is not None
assert string_child_idx is not None
string_parent.insert_child(string_child_idx, child)
string_child_idx += 1
return insert_str_child
def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]:
def is_valid_index(idx: int) -> bool:
return 0 <= idx < len(seq)
return is_valid_index | --- +++ @@ -1,3 +1,6 @@+"""
+String transformers that can split and merge strings.
+"""
import re
from abc import ABC, abstractmethod
@@ -37,6 +40,7 @@
class CannotTransform(Exception):
+ """Base class for errors raised by Transformers."""
# types
@@ -54,6 +58,10 @@
def TErr(err_msg: str) -> Err[CannotTransform]:
+ """(T)ransform Err
+
+ Convenience function used when working with the TResult type.
+ """
cant_transform = CannotTransform(err_msg)
return Err(cant_transform)
@@ -62,6 +70,7 @@ def hug_power_op(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
+ """A transformer which normalizes spacing around power operators."""
# Performance optimization to avoid unnecessary Leaf clones and other ops.
for leaf in line.leaves:
@@ -127,6 +136,11 @@
# Remove when `simplify_power_operator_hugging` becomes stable.
def handle_is_simple_look_up_prev(line: Line, index: int, disallowed: set[int]) -> bool:
+ """
+ Handling the determination of is_simple_lookup for the lines prior to the doublestar
+ token. This is required because of the need to isolate the chained expression
+ to determine the bracket or parenthesis belong to the single expression.
+ """
contains_disallowed = False
chain = []
@@ -147,6 +161,11 @@ def handle_is_simple_lookup_forward(
line: Line, index: int, disallowed: set[int]
) -> bool:
+ """
+ Handling decision is_simple_lookup for the lines behind the doublestar token.
+ This function is simplified to keep consistent with the prior logic and the forward
+ case are more straightforward and do not need to care about chained expressions.
+ """
while 0 <= index < len(line.leaves):
current = line.leaves[index]
if current.type in disallowed:
@@ -167,6 +186,10 @@
# Remove when `simplify_power_operator_hugging` becomes stable.
def is_expression_chained(chained_leaves: list[Leaf]) -> bool:
+ """
+ Function to determine if the variable is a chained call.
+ (e.g., foo.lookup, foo().lookup, (foo.lookup())) will be recognized as chained call)
+ """
if len(chained_leaves) < 2:
return True
@@ -184,6 +207,31 @@
class StringTransformer(ABC):
+ """
+ An implementation of the Transformer protocol that relies on its
+ subclasses overriding the template methods `do_match(...)` and
+ `do_transform(...)`.
+
+ This Transformer works exclusively on strings (for example, by merging
+ or splitting them).
+
+ The following sections can be found among the docstrings of each concrete
+ StringTransformer subclass.
+
+ Requirements:
+ Which requirements must be met of the given Line for this
+ StringTransformer to be applied?
+
+ Transformations:
+ If the given Line meets all of the above requirements, which string
+ transformations can you expect to be applied to it by this
+ StringTransformer?
+
+ Collaborations:
+ What contractual agreements does this StringTransformer have with other
+ StringTransfomers? Such collaborations should be eliminated/minimized
+ as much as possible.
+ """
__name__: Final = "StringTransformer"
@@ -195,15 +243,49 @@
@abstractmethod
def do_match(self, line: Line) -> TMatchResult:
+ """
+ Returns:
+ * Ok(string_indices) such that for each index, `line.leaves[index]`
+ is our target string if a match was able to be made. For
+ transformers that don't result in more lines (e.g. StringMerger,
+ StringParenStripper), multiple matches and transforms are done at
+ once to reduce the complexity.
+ OR
+ * Err(CannotTransform), if no match could be made.
+ """
@abstractmethod
def do_transform(
self, line: Line, string_indices: list[int]
) -> Iterator[TResult[Line]]:
+ """
+ Yields:
+ * Ok(new_line) where new_line is the new transformed line.
+ OR
+ * Err(CannotTransform) if the transformation failed for some reason. The
+ `do_match(...)` template method should usually be used to reject
+ the form of the given Line, but in some cases it is difficult to
+ know whether or not a Line meets the StringTransformer's
+ requirements until the transformation is already midway.
+
+ Side Effects:
+ This method should NOT mutate @line directly, but it MAY mutate the
+ Line's underlying Node structure. (WARNING: If the underlying Node
+ structure IS altered, then this method should NOT be allowed to
+ yield an CannotTransform after that point.)
+ """
def __call__(
self, line: Line, _features: Collection[Feature], _mode: Mode
) -> Iterator[Line]:
+ """
+ StringTransformer instances have a call signature that mirrors that of
+ the Transformer type.
+
+ Raises:
+ CannotTransform(...) if the concrete StringTransformer class is unable
+ to transform @line.
+ """
# Optimization to avoid calling `self.do_match(...)` when the line does
# not contain any string.
if not any(leaf.type == token.STRING for leaf in line.leaves):
@@ -232,6 +314,25 @@
@dataclass
class CustomSplit:
+ """A custom (i.e. manual) string split.
+
+ A single CustomSplit instance represents a single substring.
+
+ Examples:
+ Consider the following string:
+ ```
+ "Hi there friend."
+ " This is a custom"
+ f" string {split}."
+ ```
+
+ This string will correspond to the following three CustomSplit instances:
+ ```
+ CustomSplit(False, 16)
+ CustomSplit(False, 17)
+ CustomSplit(True, 16)
+ ```
+ """
has_prefix: bool
break_idx: int
@@ -242,6 +343,11 @@
@trait
class CustomSplitMapMixin:
+ """
+ This mixin class is used to map merged strings to a sequence of
+ CustomSplits, which will then be used to re-split the strings iff none of
+ the resultant substrings go over the configured max line length.
+ """
_CUSTOM_SPLIT_MAP: ClassVar[dict[CustomSplitMapKey, tuple[CustomSplit, ...]]] = (
defaultdict(tuple)
@@ -249,15 +355,37 @@
@staticmethod
def _get_key(string: str) -> CustomSplitMapKey:
+ """
+ Returns:
+ A unique identifier that is used internally to map @string to a
+ group of custom splits.
+ """
return (id(string), string)
def add_custom_splits(
self, string: str, custom_splits: Iterable[CustomSplit]
) -> None:
+ """Custom Split Map Setter Method
+
+ Side Effects:
+ Adds a mapping from @string to the custom splits @custom_splits.
+ """
key = self._get_key(string)
self._CUSTOM_SPLIT_MAP[key] = tuple(custom_splits)
def pop_custom_splits(self, string: str) -> list[CustomSplit]:
+ """Custom Split Map Getter Method
+
+ Returns:
+ * A list of the custom splits that are mapped to @string, if any
+ exist.
+ OR
+ * [], otherwise.
+
+ Side Effects:
+ Deletes the mapping between @string and its associated custom
+ splits (which are returned to the caller).
+ """
key = self._get_key(string)
custom_splits = self._CUSTOM_SPLIT_MAP[key]
@@ -266,11 +394,33 @@ return list(custom_splits)
def has_custom_splits(self, string: str) -> bool:
+ """
+ Returns:
+ True iff @string is associated with a set of custom splits.
+ """
key = self._get_key(string)
return key in self._CUSTOM_SPLIT_MAP
class StringMerger(StringTransformer, CustomSplitMapMixin):
+ """StringTransformer that merges strings together.
+
+ Requirements:
+ (A) The line contains adjacent strings such that ALL of the validation checks
+ listed in StringMerger._validate_msg(...)'s docstring pass.
+ OR
+ (B) The line contains a string which uses line continuation backslashes.
+
+ Transformations:
+ Depending on which of the two requirements above where met, either:
+
+ (A) The string group associated with the target string is merged.
+ OR
+ (B) All line-continuation backslashes are removed from the target string.
+
+ Collaborations:
+ StringMerger provides custom split information to StringSplitter.
+ """
def do_match(self, line: Line) -> TMatchResult:
LL = line.leaves
@@ -355,6 +505,16 @@ def _remove_backslash_line_continuation_chars(
line: Line, string_indices: list[int]
) -> TResult[Line]:
+ """
+ Merge strings that were split across multiple lines using
+ line-continuation backslashes.
+
+ Returns:
+ Ok(new_line), if @line contains backslash line-continuation
+ characters.
+ OR
+ Err(CannotTransform), otherwise.
+ """
LL = line.leaves
indices_to_transform = []
@@ -386,6 +546,18 @@ def _merge_string_group(
self, line: Line, string_indices: list[int]
) -> TResult[Line]:
+ """
+ Merges string groups (i.e. set of adjacent strings).
+
+ Each index from `string_indices` designates one string group's first
+ leaf in `line.leaves`.
+
+ Returns:
+ Ok(new_line), if ALL of the validation checks found in
+ _validate_msg(...) pass.
+ OR
+ Err(CannotTransform), otherwise.
+ """
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
@@ -429,6 +601,15 @@ def _merge_one_string_group(
self, LL: list[Leaf], string_idx: int, is_valid_index: Callable[[int], bool]
) -> tuple[int, Leaf]:
+ """
+ Merges one string group where the first string in the group is
+ `LL[string_idx]`.
+
+ Returns:
+ A tuple of `(num_of_strings, leaf)` where `num_of_strings` is the
+ number of strings merged and `leaf` is the newly merged string
+ to be replaced in the new line.
+ """
# If the string group is wrapped inside an Atom node, we must make sure
# to later replace that Atom with our new (merged) string leaf.
atom_node = LL[string_idx].parent
@@ -442,6 +623,17 @@ QUOTE = LL[string_idx].value[-1]
def make_naked(string: str, string_prefix: str) -> str:
+ """Strip @string (i.e. make it a "naked" string)
+
+ Pre-conditions:
+ * assert_is_leaf_string(@string)
+
+ Returns:
+ A string that is identical to @string except that
+ @string_prefix has been stripped, the surrounding QUOTE
+ characters have been removed, and any remaining QUOTE
+ characters have been escaped.
+ """
assert_is_leaf_string(string)
if "f" in string_prefix:
f_expressions = [
@@ -562,6 +754,30 @@
@staticmethod
def _validate_msg(line: Line, string_idx: int) -> TResult[None]:
+ """Validate (M)erge (S)tring (G)roup
+
+ Transform-time string validation logic for _merge_string_group(...).
+
+ Returns:
+ * Ok(None), if ALL validation checks (listed below) pass.
+ OR
+ * Err(CannotTransform), if any of the following are true:
+ - The target string group does not contain ANY stand-alone comments.
+ - The target string is not in a string group (i.e. it has no
+ adjacent strings).
+ - The string group has more than one inline comment.
+ - The string group has an inline comment that appears to be a pragma.
+ - The set of all string prefixes in the string group is of
+ length greater than one and is not equal to {"", "f"}.
+ - The string group consists of raw strings.
+ - The string group would merge f-strings with different quote types
+ and internal quotes.
+ - The string group is stringified type annotations. We don't want to
+ process stringified type annotations since pyright doesn't support
+ them spanning multiple string values. (NOTE: mypy, pytype, pyre do
+ support them, so we can change if pyright also gains support in the
+ future. See https://github.com/microsoft/pyright/issues/4359.)
+ """
# We first check for "inner" stand-alone comments (i.e. stand-alone
# comments that have a string leaf before them AND after them).
for inc in [1, -1]:
@@ -641,6 +857,25 @@
class StringParenStripper(StringTransformer):
+ """StringTransformer that strips surrounding parentheses from strings.
+
+ Requirements:
+ The line contains a string which is surrounded by parentheses and:
+ - The target string is NOT the only argument to a function call.
+ - The target string is NOT a "pointless" string.
+ - The target string is NOT a dictionary value.
+ - If the target string contains a PERCENT, the brackets are not
+ preceded or followed by an operator with higher precedence than
+ PERCENT.
+
+ Transformations:
+ The parentheses mentioned in the 'Requirements' section are stripped.
+
+ Collaborations:
+ StringParenStripper has its own inherent usefulness, but it is also
+ relied on to clean up the parentheses created by StringParenWrapper (in
+ the event that they are no longer needed).
+ """
def do_match(self, line: Line) -> TMatchResult:
LL = line.leaves
@@ -813,6 +1048,29 @@
class BaseStringSplitter(StringTransformer):
+ """
+ Abstract class for StringTransformers which transform a Line's strings by splitting
+ them or placing them on their own lines where necessary to avoid going over
+ the configured line length.
+
+ Requirements:
+ * The target string value is responsible for the line going over the
+ line length limit. It follows that after all of black's other line
+ split methods have been exhausted, this line (or one of the resulting
+ lines after all line splits are performed) would still be over the
+ line_length limit unless we split this string.
+ AND
+
+ * The target string is NOT a "pointless" string (i.e. a string that has
+ no parent or siblings).
+ AND
+
+ * The target string is not followed by an inline comment that appears
+ to be a pragma.
+ AND
+
+ * The target string is not a multiline (i.e. triple-quote) string.
+ """
STRING_OPERATORS: Final = [
token.EQEQUAL,
@@ -828,6 +1086,14 @@
@abstractmethod
def do_splitter_match(self, line: Line) -> TMatchResult:
+ """
+ BaseStringSplitter asks its clients to override this method instead of
+ `StringTransformer.do_match(...)`.
+
+ Follows the same protocol as `StringTransformer.do_match(...)`.
+
+ Refer to `help(StringTransformer.do_match)` for more information.
+ """
def do_match(self, line: Line) -> TMatchResult:
match_result = self.do_splitter_match(line)
@@ -847,6 +1113,16 @@ return match_result
def _validate(self, line: Line, string_idx: int) -> TResult[None]:
+ """
+ Checks that @line meets all of the requirements listed in this classes'
+ docstring. Refer to `help(BaseStringSplitter)` for a detailed
+ description of those requirements.
+
+ Returns:
+ * Ok(None), if ALL of the requirements are met.
+ OR
+ * Err(CannotTransform), if ANY of the requirements are NOT met.
+ """
LL = line.leaves
string_leaf = LL[string_idx]
@@ -880,6 +1156,20 @@ return Ok(None)
def _get_max_string_length(self, line: Line, string_idx: int) -> int:
+ """
+ Calculates the max string length used when attempting to determine
+ whether or not the target string is responsible for causing the line to
+ go over the line length limit.
+
+ WARNING: This method is tightly coupled to both StringSplitter and
+ (especially) StringParenWrapper. There is probably a better way to
+ accomplish what is being done here.
+
+ Returns:
+ max_string_length: such that `line.leaves[string_idx].value >
+ max_string_length` implies that the target string IS responsible
+ for causing this line to exceed the line length limit.
+ """
LL = line.leaves
is_valid_index = is_valid_index_factory(LL)
@@ -976,6 +1266,15 @@
@staticmethod
def _prefer_paren_wrap_match(LL: list[Leaf]) -> int | None:
+ """
+ Returns:
+ string_idx such that @LL[string_idx] is equal to our target (i.e.
+ matched) string, if this line matches the "prefer paren wrap" statement
+ requirements listed in the 'Requirements' section of the StringParenWrapper
+ class's docstring.
+ OR
+ None, otherwise.
+ """
# The line must start with a string.
if LL[0].type != token.STRING:
return None
@@ -1012,6 +1311,12 @@
def iter_fexpr_spans(s: str) -> Iterator[tuple[int, int]]:
+ """
+ Yields spans corresponding to expressions in a given f-string.
+ Spans are half-open ranges (left inclusive, right exclusive).
+ Assumes the input string is a valid f-string, but will not crash if the input
+ string is invalid.
+ """
stack: list[int] = [] # our curly paren stack
i = 0
while i < len(s):
@@ -1057,6 +1362,18 @@
def _toggle_fexpr_quotes(fstring: str, old_quote: str) -> str:
+ """
+ Toggles quotes used in f-string expressions that are `old_quote`.
+
+ f-string expressions can't contain backslashes, so we need to toggle the
+ quotes if the f-string itself will end up using the same quote. We can
+ simply toggle without escaping because, quotes can't be reused in f-string
+ expressions. They will fail to parse.
+
+ NOTE: If PEP 701 is accepted, above statement will no longer be true.
+ Though if quotes can be reused, we can simply reuse them without updates or
+ escaping, once Black figures out how to parse the new grammar.
+ """
new_quote = "'" if old_quote == '"' else '"'
parts = []
previous_index = 0
@@ -1069,6 +1386,42 @@
class StringSplitter(BaseStringSplitter, CustomSplitMapMixin):
+ """
+ StringTransformer that splits "atom" strings (i.e. strings which exist on
+ lines by themselves).
+
+ Requirements:
+ * The line consists ONLY of a single string (possibly prefixed by a
+ string operator [e.g. '+' or '==']), MAYBE a string trailer, and MAYBE
+ a trailing comma.
+ AND
+ * All of the requirements listed in BaseStringSplitter's docstring.
+
+ Transformations:
+ The string mentioned in the 'Requirements' section is split into as
+ many substrings as necessary to adhere to the configured line length.
+
+ In the final set of substrings, no substring should be smaller than
+ MIN_SUBSTR_SIZE characters.
+
+ The string will ONLY be split on spaces (i.e. each new substring should
+ start with a space). Note that the string will NOT be split on a space
+ which is escaped with a backslash.
+
+ If the string is an f-string, it will NOT be split in the middle of an
+ f-expression (e.g. in f"FooBar: {foo() if x else bar()}", {foo() if x
+ else bar()} is an f-expression).
+
+ If the string that is being split has an associated set of custom split
+ records and those custom splits will NOT result in any line going over
+ the configured line length, those custom splits are used. Otherwise the
+ string is split as late as possible (from left-to-right) while still
+ adhering to the transformation rules listed above.
+
+ Collaborations:
+ StringSplitter relies on StringMerger to construct the appropriate
+ CustomSplit objects and add them to the custom split map.
+ """
MIN_SUBSTR_SIZE: Final = 6
@@ -1175,6 +1528,13 @@ )
def maybe_append_string_operators(new_line: Line) -> None:
+ """
+ Side Effects:
+ If @line starts with a string operator and this is the first
+ line we are constructing, this function appends the string
+ operator to @new_line and replaces the old string operator leaf
+ in the node structure. Otherwise this function does nothing.
+ """
maybe_prefix_leaves = string_op_leaves if first_string_line else []
for i, prefix_leaf in enumerate(maybe_prefix_leaves):
replace_child(LL[i], prefix_leaf)
@@ -1185,6 +1545,13 @@ )
def max_last_string_column() -> int:
+ """
+ Returns:
+ The max allowed width of the string value used for the last
+ line we will construct. Note that this value means the width
+ rather than the number of characters (e.g., many East Asian
+ characters expand to two columns).
+ """
result = self.line_length
result -= line.depth * 4
result -= 1 if ends_with_comma else 0
@@ -1219,6 +1586,11 @@ rest_value = LL[string_idx].value
def more_splits_should_be_made() -> bool:
+ """
+ Returns:
+ True iff `rest_value` (the remaining string value from the last
+ split), should be split again.
+ """
if use_custom_breakpoints:
return len(custom_splits) > 1
else:
@@ -1354,6 +1726,12 @@ yield Ok(last_line)
def _iter_nameescape_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
+ r"""
+ Yields:
+ All ranges of @string which, if @string were to be split there,
+ would result in the splitting of an \N{...} expression (which is NOT
+ allowed).
+ """
# True - the previous backslash was unescaped
# False - the previous backslash was escaped *or* there was no backslash
previous_was_unescaped_backslash = False
@@ -1379,6 +1757,12 @@ yield begin, end
def _iter_fexpr_slices(self, string: str) -> Iterator[tuple[Index, Index]]:
+ """
+ Yields:
+ All ranges of @string which, if @string were to be split there,
+ would result in the splitting of an f-expression (which is NOT
+ allowed).
+ """
if "f" not in get_string_prefix(string).lower():
return
yield from iter_fexpr_spans(string)
@@ -1395,6 +1779,29 @@ return illegal_indices
def _get_break_idx(self, string: str, max_break_idx: int) -> int | None:
+ """
+ This method contains the algorithm that StringSplitter uses to
+ determine which character to split each string at.
+
+ Args:
+ @string: The substring that we are attempting to split.
+ @max_break_idx: The ideal break index. We will return this value if it
+ meets all the necessary conditions. In the likely event that it
+ doesn't we will try to find the closest index BELOW @max_break_idx
+ that does. If that fails, we will expand our search by also
+ considering all valid indices ABOVE @max_break_idx.
+
+ Pre-Conditions:
+ * assert_is_leaf_string(@string)
+ * 0 <= @max_break_idx < len(@string)
+
+ Returns:
+ break_idx, if an index is able to be found that meets all of the
+ conditions listed in the 'Transformations' section of this classes'
+ docstring.
+ OR
+ None, otherwise.
+ """
is_valid_index = is_valid_index_factory(string)
assert is_valid_index(max_break_idx)
@@ -1403,9 +1810,19 @@ _illegal_split_indices = self._get_illegal_split_indices(string)
def breaks_unsplittable_expression(i: Index) -> bool:
+ """
+ Returns:
+ True iff returning @i would result in the splitting of an
+ unsplittable expression (which is NOT allowed).
+ """
return i in _illegal_split_indices
def passes_all_checks(i: Index) -> bool:
+ """
+ Returns:
+ True iff ALL of the conditions listed in the 'Transformations'
+ section of this classes' docstring would be met by returning @i.
+ """
is_space = string[i] == " "
is_split_safe = is_valid_index(i - 1) and string[i - 1] in SPLIT_SAFE_CHARS
@@ -1451,6 +1868,18 @@ leaf.value = normalize_string_quotes(leaf.value)
def _normalize_f_string(self, string: str, prefix: str) -> str:
+ """
+ Pre-Conditions:
+ * assert_is_leaf_string(@string)
+
+ Returns:
+ * If @string is an f-string that contains no f-expressions, we
+ return a string identical to @string except that the 'f' prefix
+ has been stripped and all double braces (i.e. '{{' or '}}') have
+ been normalized (i.e. turned into '{' or '}').
+ OR
+ * Otherwise, we return @string.
+ """
assert_is_leaf_string(string)
if "f" in prefix and not fstring_contains_expr(string):
@@ -1478,6 +1907,62 @@
class StringParenWrapper(BaseStringSplitter, CustomSplitMapMixin):
+ """
+ StringTransformer that wraps strings in parens and then splits at the LPAR.
+
+ Requirements:
+ All of the requirements listed in BaseStringSplitter's docstring in
+ addition to the requirements listed below:
+
+ * The line is a return/yield statement, which returns/yields a string.
+ OR
+ * The line is part of a ternary expression (e.g. `x = y if cond else
+ z`) such that the line starts with `else <string>`, where <string> is
+ some string.
+ OR
+ * The line is an assert statement, which ends with a string.
+ OR
+ * The line is an assignment statement (e.g. `x = <string>` or `x +=
+ <string>`) such that the variable is being assigned the value of some
+ string.
+ OR
+ * The line is a dictionary key assignment where some valid key is being
+ assigned the value of some string.
+ OR
+ * The line is an lambda expression and the value is a string.
+ OR
+ * The line starts with an "atom" string that prefers to be wrapped in
+ parens. It's preferred to be wrapped when it's is an immediate child of
+ a list/set/tuple literal, AND the string is surrounded by commas (or is
+ the first/last child).
+
+ Transformations:
+ The chosen string is wrapped in parentheses and then split at the LPAR.
+
+ We then have one line which ends with an LPAR and another line that
+ starts with the chosen string. The latter line is then split again at
+ the RPAR. This results in the RPAR (and possibly a trailing comma)
+ being placed on its own line.
+
+ NOTE: If any leaves exist to the right of the chosen string (except
+ for a trailing comma, which would be placed after the RPAR), those
+ leaves are placed inside the parentheses. In effect, the chosen
+ string is not necessarily being "wrapped" by parentheses. We can,
+ however, count on the LPAR being placed directly before the chosen
+ string.
+
+ In other words, StringParenWrapper creates "atom" strings. These
+ can then be split again by StringSplitter, if necessary.
+
+ Collaborations:
+ In the event that a string line split by StringParenWrapper is
+ changed such that it no longer needs to be given its own line,
+ StringParenWrapper relies on StringParenStripper to clean up the
+ parentheses it created.
+
+ For "atom" strings that prefers to be wrapped in parens, it requires
+ StringSplitter to hold the split until the string is wrapped in parens.
+ """
def do_splitter_match(self, line: Line) -> TMatchResult:
LL = line.leaves
@@ -1524,6 +2009,15 @@
@staticmethod
def _return_match(LL: list[Leaf]) -> int | None:
+ """
+ Returns:
+ string_idx such that @LL[string_idx] is equal to our target (i.e.
+ matched) string, if this line matches the return/yield statement
+ requirements listed in the 'Requirements' section of this classes'
+ docstring.
+ OR
+ None, otherwise.
+ """
# If this line is a part of a return/yield statement and the first leaf
# contains either the "return" or "yield" keywords...
if parent_type(LL[0]) in [syms.return_stmt, syms.yield_expr] and LL[
@@ -1540,6 +2034,15 @@
@staticmethod
def _else_match(LL: list[Leaf]) -> int | None:
+ """
+ Returns:
+ string_idx such that @LL[string_idx] is equal to our target (i.e.
+ matched) string, if this line matches the ternary expression
+ requirements listed in the 'Requirements' section of this classes'
+ docstring.
+ OR
+ None, otherwise.
+ """
# If this line is a part of a ternary expression and the first leaf
# contains the "else" keyword...
if (
@@ -1558,6 +2061,15 @@
@staticmethod
def _assert_match(LL: list[Leaf]) -> int | None:
+ """
+ Returns:
+ string_idx such that @LL[string_idx] is equal to our target (i.e.
+ matched) string, if this line matches the assert statement
+ requirements listed in the 'Requirements' section of this classes'
+ docstring.
+ OR
+ None, otherwise.
+ """
# If this line is a part of an assert statement and the first leaf
# contains the "assert" keyword...
if parent_type(LL[0]) == syms.assert_stmt and LL[0].value == "assert":
@@ -1584,6 +2096,15 @@
@staticmethod
def _assign_match(LL: list[Leaf]) -> int | None:
+ """
+ Returns:
+ string_idx such that @LL[string_idx] is equal to our target (i.e.
+ matched) string, if this line matches the assignment statement
+ requirements listed in the 'Requirements' section of this classes'
+ docstring.
+ OR
+ None, otherwise.
+ """
# If this line is a part of an expression statement or is a function
# argument AND the first leaf contains a variable name...
if (
@@ -1622,6 +2143,15 @@
@staticmethod
def _dict_or_lambda_match(LL: list[Leaf]) -> int | None:
+ """
+ Returns:
+ string_idx such that @LL[string_idx] is equal to our target (i.e.
+ matched) string, if this line matches the dictionary key assignment
+ statement or lambda expression requirements listed in the
+ 'Requirements' section of this classes' docstring.
+ OR
+ None, otherwise.
+ """
# If this line is a part of a dictionary key assignment or lambda expression...
parent_types = [parent_type(LL[0]), parent_type(LL[0].parent)]
if syms.dictsetmaker in parent_types or syms.lambdef in parent_types:
@@ -1652,6 +2182,18 @@
@staticmethod
def _trailing_comma_tuple_match(line: Line) -> int | None:
+ """
+ Returns:
+ string_idx such that @line.leaves[string_idx] is equal to our target
+ (i.e. matched) string, if the line is a bare trailing comma tuple
+ (STRING + COMMA) not inside brackets.
+ OR
+ None, otherwise.
+
+ This handles the case from issue #4912 where a long string with a
+ trailing comma (making it a one-item tuple) needs to be wrapped in
+ parentheses before splitting to preserve AST equivalence.
+ """
LL = line.leaves
# Match: STRING followed by COMMA, not inside brackets
if (
@@ -1789,6 +2331,34 @@
class StringParser:
+ """
+ A state machine that aids in parsing a string's "trailer", which can be
+ either non-existent, an old-style formatting sequence (e.g. `% varX` or `%
+ (varX, varY)`), or a method-call / attribute access (e.g. `.format(varX,
+ varY)`).
+
+ NOTE: A new StringParser object MUST be instantiated for each string
+ trailer we need to parse.
+
+ Examples:
+ We shall assume that `line` equals the `Line` object that corresponds
+ to the following line of python code:
+ ```
+ x = "Some {}.".format("String") + some_other_string
+ ```
+
+ Furthermore, we will assume that `string_idx` is some index such that:
+ ```
+ assert line.leaves[string_idx].value == "Some {}."
+ ```
+
+ The following code snippet then holds:
+ ```
+ string_parser = StringParser()
+ idx = string_parser.parse(line.leaves, string_idx)
+ assert line.leaves[idx].type == token.PLUS
+ ```
+ """
DEFAULT_TOKEN: Final = 20210605
@@ -1833,6 +2403,16 @@ self._unmatched_lpars = 0
def parse(self, leaves: list[Leaf], string_idx: int) -> int:
+ """
+ Pre-conditions:
+ * @leaves[@string_idx].type == token.STRING
+
+ Returns:
+ The index directly after the last leaf which is a part of the string
+ trailer, if a "trailer" exists.
+ OR
+ @string_idx + 1, if no string "trailer" exists.
+ """
assert leaves[string_idx].type == token.STRING
idx = string_idx + 1
@@ -1841,6 +2421,18 @@ return idx
def _next_state(self, leaf: Leaf) -> bool:
+ """
+ Pre-conditions:
+ * On the first call to this function, @leaf MUST be the leaf that
+ was directly after the string leaf in question (e.g. if our target
+ string is `line.leaves[i]` then the first call to this method must
+ be `line.leaves[i + 1]`).
+ * On the next call to this function, the leaf parameter passed in
+ MUST be the leaf directly following @leaf.
+
+ Returns:
+ True iff @leaf is a part of the string's trailer.
+ """
# We ignore empty LPAR or RPAR leaves.
if is_empty_par(leaf):
return True
@@ -1881,6 +2473,51 @@
def insert_str_child_factory(string_leaf: Leaf) -> Callable[[LN], None]:
+ """
+ Factory for a convenience function that is used to orphan @string_leaf
+ and then insert multiple new leaves into the same part of the node
+ structure that @string_leaf had originally occupied.
+
+ Examples:
+ Let `string_leaf = Leaf(token.STRING, '"foo"')` and `N =
+ string_leaf.parent`. Assume the node `N` has the following
+ original structure:
+
+ Node(
+ expr_stmt, [
+ Leaf(NAME, 'x'),
+ Leaf(EQUAL, '='),
+ Leaf(STRING, '"foo"'),
+ ]
+ )
+
+ We then run the code snippet shown below.
+ ```
+ insert_str_child = insert_str_child_factory(string_leaf)
+
+ lpar = Leaf(token.LPAR, '(')
+ insert_str_child(lpar)
+
+ bar = Leaf(token.STRING, '"bar"')
+ insert_str_child(bar)
+
+ rpar = Leaf(token.RPAR, ')')
+ insert_str_child(rpar)
+ ```
+
+ After which point, it follows that `string_leaf.parent is None` and
+ the node `N` now has the following structure:
+
+ Node(
+ expr_stmt, [
+ Leaf(NAME, 'x'),
+ Leaf(EQUAL, '='),
+ Leaf(LPAR, '('),
+ Leaf(STRING, '"bar"'),
+ Leaf(RPAR, ')'),
+ ]
+ )
+ """
string_parent = string_leaf.parent
string_child_idx = string_leaf.remove()
@@ -1897,8 +2534,27 @@
def is_valid_index_factory(seq: Sequence[Any]) -> Callable[[int], bool]:
+ """
+ Examples:
+ ```
+ my_list = [1, 2, 3]
+
+ is_valid_index = is_valid_index_factory(my_list)
+
+ assert is_valid_index(0)
+ assert is_valid_index(2)
+
+ assert not is_valid_index(3)
+ assert not is_valid_index(-1)
+ ```
+ """
def is_valid_index(idx: int) -> bool:
+ """
+ Returns:
+ True iff @idx is positive AND seq[@idx] does NOT raise an
+ IndexError.
+ """
return 0 <= idx < len(seq)
- return is_valid_index+ return is_valid_index
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/trans.py |
Add clean documentation to messy code | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# mypy: ignore-errors
# Python imports
import re
# Local imports
from blib2to3.pgen2 import grammar, token
class Converter(grammar.Grammar):
def run(self, graminit_h, graminit_c):
self.parse_graminit_h(graminit_h)
self.parse_graminit_c(graminit_c)
self.finish_off()
def parse_graminit_h(self, filename):
try:
f = open(filename)
except OSError as err:
print(f"Can't open {filename}: {err}")
return False
self.symbol2number = {}
self.number2symbol = {}
lineno = 0
for line in f:
lineno += 1
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
if not mo and line.strip():
print(f"{filename}({lineno}): can't parse {line.strip()}")
else:
symbol, number = mo.groups()
number = int(number)
assert symbol not in self.symbol2number
assert number not in self.number2symbol
self.symbol2number[symbol] = number
self.number2symbol[number] = symbol
return True
def parse_graminit_c(self, filename):
try:
f = open(filename)
except OSError as err:
print(f"Can't open {filename}: {err}")
return False
# The code below essentially uses f's iterator-ness!
lineno = 0
# Expect the two #include lines
lineno, line = lineno + 1, next(f)
assert line == '#include "pgenheaders.h"\n', (lineno, line)
lineno, line = lineno + 1, next(f)
assert line == '#include "grammar.h"\n', (lineno, line)
# Parse the state definitions
lineno, line = lineno + 1, next(f)
allarcs = {}
states = []
while line.startswith("static arc "):
while line.startswith("static arc "):
mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$", line)
assert mo, (lineno, line)
n, m, k = list(map(int, mo.groups()))
arcs = []
for _ in range(k):
lineno, line = lineno + 1, next(f)
mo = re.match(r"\s+{(\d+), (\d+)},$", line)
assert mo, (lineno, line)
i, j = list(map(int, mo.groups()))
arcs.append((i, j))
lineno, line = lineno + 1, next(f)
assert line == "};\n", (lineno, line)
allarcs[(n, m)] = arcs
lineno, line = lineno + 1, next(f)
mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
assert mo, (lineno, line)
s, t = list(map(int, mo.groups()))
assert s == len(states), (lineno, line)
state = []
for _ in range(t):
lineno, line = lineno + 1, next(f)
mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
assert mo, (lineno, line)
k, n, m = list(map(int, mo.groups()))
arcs = allarcs[n, m]
assert k == len(arcs), (lineno, line)
state.append(arcs)
states.append(state)
lineno, line = lineno + 1, next(f)
assert line == "};\n", (lineno, line)
lineno, line = lineno + 1, next(f)
self.states = states
# Parse the dfas
dfas = {}
mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
for i in range(ndfas):
lineno, line = lineno + 1, next(f)
mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$', line)
assert mo, (lineno, line)
symbol = mo.group(2)
number, x, y, z = list(map(int, mo.group(1, 3, 4, 5)))
assert self.symbol2number[symbol] == number, (lineno, line)
assert self.number2symbol[number] == symbol, (lineno, line)
assert x == 0, (lineno, line)
state = states[z]
assert y == len(state), (lineno, line)
lineno, line = lineno + 1, next(f)
mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
assert mo, (lineno, line)
first = {}
rawbitset = eval(mo.group(1))
for i, c in enumerate(rawbitset):
byte = ord(c)
for j in range(8):
if byte & (1 << j):
first[i * 8 + j] = 1
dfas[number] = (state, first)
lineno, line = lineno + 1, next(f)
assert line == "};\n", (lineno, line)
self.dfas = dfas
# Parse the labels
labels = []
lineno, line = lineno + 1, next(f)
mo = re.match(r"static label labels\[(\d+)\] = {$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
for i in range(nlabels):
lineno, line = lineno + 1, next(f)
mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
assert mo, (lineno, line)
x, y = mo.groups()
x = int(x)
if y == "0":
y = None
else:
y = eval(y)
labels.append((x, y))
lineno, line = lineno + 1, next(f)
assert line == "};\n", (lineno, line)
self.labels = labels
# Parse the grammar struct
lineno, line = lineno + 1, next(f)
assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
lineno, line = lineno + 1, next(f)
mo = re.match(r"\s+(\d+),$", line)
assert mo, (lineno, line)
ndfas = int(mo.group(1))
assert ndfas == len(self.dfas)
lineno, line = lineno + 1, next(f)
assert line == "\tdfas,\n", (lineno, line)
lineno, line = lineno + 1, next(f)
mo = re.match(r"\s+{(\d+), labels},$", line)
assert mo, (lineno, line)
nlabels = int(mo.group(1))
assert nlabels == len(self.labels), (lineno, line)
lineno, line = lineno + 1, next(f)
mo = re.match(r"\s+(\d+)$", line)
assert mo, (lineno, line)
start = int(mo.group(1))
assert start in self.number2symbol, (lineno, line)
self.start = start
lineno, line = lineno + 1, next(f)
assert line == "};\n", (lineno, line)
try:
lineno, line = lineno + 1, next(f)
except StopIteration:
pass
else:
assert 0, (lineno, line)
def finish_off(self):
self.keywords = {} # map from keyword strings to arc labels
self.tokens = {} # map from numeric token values to arc labels
for ilabel, (type, value) in enumerate(self.labels):
if type == token.NAME and value is not None:
self.keywords[value] = ilabel
elif value is None:
self.tokens[type] = ilabel | --- +++ @@ -3,6 +3,30 @@
# mypy: ignore-errors
+"""Convert graminit.[ch] spit out by pgen to Python code.
+
+Pgen is the Python parser generator. It is useful to quickly create a
+parser from a grammar file in Python's grammar notation. But I don't
+want my parsers to be written in C (yet), so I'm translating the
+parsing tables to Python data structures and writing a Python parse
+engine.
+
+Note that the token numbers are constants determined by the standard
+Python tokenizer. The standard token module defines these numbers and
+their names (the names are not used much). The token numbers are
+hardcoded into the Python tokenizer and into pgen. A Python
+implementation of the Python tokenizer is also available, in the
+standard tokenize module.
+
+On the other hand, symbol numbers (representing the grammar's
+non-terminals) are assigned by pgen based on the actual grammar
+input.
+
+Note: this module is pretty much obsolete; the pgen module generates
+equivalent grammar tables directly from the Grammar.txt input file
+without having to invoke the Python pgen C program.
+
+"""
# Python imports
import re
@@ -12,13 +36,30 @@
class Converter(grammar.Grammar):
+ """Grammar subclass that reads classic pgen output files.
+
+ The run() method reads the tables as produced by the pgen parser
+ generator, typically contained in two C files, graminit.h and
+ graminit.c. The other methods are for internal use only.
+
+ See the base class for more documentation.
+
+ """
def run(self, graminit_h, graminit_c):
+ """Load the grammar tables from the text files written by pgen."""
self.parse_graminit_h(graminit_h)
self.parse_graminit_c(graminit_c)
self.finish_off()
def parse_graminit_h(self, filename):
+ """Parse the .h file written by pgen. (Internal)
+
+ This file is a sequence of #define statements defining the
+ nonterminals of the grammar as numbers. We build two tables
+ mapping the numbers to names and back.
+
+ """
try:
f = open(filename)
except OSError as err:
@@ -42,6 +83,33 @@ return True
def parse_graminit_c(self, filename):
+ """Parse the .c file written by pgen. (Internal)
+
+ The file looks as follows. The first two lines are always this:
+
+ #include "pgenheaders.h"
+ #include "grammar.h"
+
+ After that come four blocks:
+
+ 1) one or more state definitions
+ 2) a table defining dfas
+ 3) a table defining labels
+ 4) a struct defining the grammar
+
+ A state definition has the following form:
+ - one or more arc arrays, each of the form:
+ static arc arcs_<n>_<m>[<k>] = {
+ {<i>, <j>},
+ ...
+ };
+ - followed by a state array, of the form:
+ static state states_<s>[<t>] = {
+ {<k>, arcs_<n>_<m>},
+ ...
+ };
+
+ """
try:
f = open(filename)
except OSError as err:
@@ -178,10 +246,11 @@ assert 0, (lineno, line)
def finish_off(self):
+ """Create additional useful structures. (Internal)."""
self.keywords = {} # map from keyword strings to arc labels
self.tokens = {} # map from numeric token values to arc labels
for ilabel, (type, value) in enumerate(self.labels):
if type == token.NAME and value is not None:
self.keywords[value] = ilabel
elif value is None:
- self.tokens[type] = ilabel+ self.tokens[type] = ilabel
| https://raw.githubusercontent.com/psf/black/HEAD/src/blib2to3/pgen2/conv.py |
Generate docstrings with parameter types |
from collections.abc import Iterable, Sequence
from dataclasses import dataclass, field
from typing import Final, Union
from black.nodes import (
BRACKET,
CLOSING_BRACKETS,
COMPARATORS,
LOGIC_OPERATORS,
MATH_OPERATORS,
OPENING_BRACKETS,
UNPACKING_PARENTS,
VARARGS_PARENTS,
is_vararg,
syms,
)
from blib2to3.pgen2 import token
from blib2to3.pytree import Leaf, Node
# types
LN = Union[Leaf, Node]
Depth = int
LeafID = int
NodeType = int
Priority = int
COMPREHENSION_PRIORITY: Final = 20
COMMA_PRIORITY: Final = 18
TERNARY_PRIORITY: Final = 16
LOGIC_PRIORITY: Final = 14
STRING_PRIORITY: Final = 12
COMPARATOR_PRIORITY: Final = 10
MATH_PRIORITIES: Final = {
token.VBAR: 9,
token.CIRCUMFLEX: 8,
token.AMPER: 7,
token.LEFTSHIFT: 6,
token.RIGHTSHIFT: 6,
token.PLUS: 5,
token.MINUS: 5,
token.STAR: 4,
token.SLASH: 4,
token.DOUBLESLASH: 4,
token.PERCENT: 4,
token.AT: 4,
token.TILDE: 3,
token.DOUBLESTAR: 2,
}
DOT_PRIORITY: Final = 1
class BracketMatchError(Exception):
@dataclass
class BracketTracker:
depth: int = 0
bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
delimiters: dict[LeafID, Priority] = field(default_factory=dict)
previous: Leaf | None = None
_for_loop_depths: list[int] = field(default_factory=list)
_lambda_argument_depths: list[int] = field(default_factory=list)
invisible: list[Leaf] = field(default_factory=list)
def mark(self, leaf: Leaf) -> None:
if leaf.type == token.COMMENT:
return
if (
self.depth == 0
and leaf.type in CLOSING_BRACKETS
and (self.depth, leaf.type) not in self.bracket_match
):
return
self.maybe_decrement_after_for_loop_variable(leaf)
self.maybe_decrement_after_lambda_arguments(leaf)
if leaf.type in CLOSING_BRACKETS:
self.depth -= 1
try:
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
except KeyError as e:
raise BracketMatchError(
"Unable to match a closing bracket to the following opening"
f" bracket: {leaf}"
) from e
leaf.opening_bracket = opening_bracket
if not leaf.value:
self.invisible.append(leaf)
leaf.bracket_depth = self.depth
if self.depth == 0:
delim = is_split_before_delimiter(leaf, self.previous)
if delim and self.previous is not None:
self.delimiters[id(self.previous)] = delim
else:
delim = is_split_after_delimiter(leaf)
if delim:
self.delimiters[id(leaf)] = delim
if leaf.type in OPENING_BRACKETS:
self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
self.depth += 1
if not leaf.value:
self.invisible.append(leaf)
self.previous = leaf
self.maybe_increment_lambda_arguments(leaf)
self.maybe_increment_for_loop_variable(leaf)
def any_open_for_or_lambda(self) -> bool:
return bool(self._for_loop_depths or self._lambda_argument_depths)
def any_open_brackets(self) -> bool:
return bool(self.bracket_match)
def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
return max(v for k, v in self.delimiters.items() if k not in exclude)
def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
if not self.delimiters:
return 0
priority = priority or self.max_delimiter_priority()
return sum(1 for p in self.delimiters.values() if p == priority)
def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
if leaf.type == token.NAME and leaf.value == "for":
self.depth += 1
self._for_loop_depths.append(self.depth)
return True
return False
def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
if (
self._for_loop_depths
and self._for_loop_depths[-1] == self.depth
and leaf.type == token.NAME
and leaf.value == "in"
):
self.depth -= 1
self._for_loop_depths.pop()
return True
return False
def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
if leaf.type == token.NAME and leaf.value == "lambda":
self.depth += 1
self._lambda_argument_depths.append(self.depth)
return True
return False
def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
if (
self._lambda_argument_depths
and self._lambda_argument_depths[-1] == self.depth
and leaf.type == token.COLON
):
self.depth -= 1
self._lambda_argument_depths.pop()
return True
return False
def get_open_lsqb(self) -> Leaf | None:
return self.bracket_match.get((self.depth - 1, token.RSQB))
def is_split_after_delimiter(leaf: Leaf) -> Priority:
if leaf.type == token.COMMA:
return COMMA_PRIORITY
return 0
def is_split_before_delimiter(leaf: Leaf, previous: Leaf | None = None) -> Priority:
if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
# * and ** might also be MATH_OPERATORS but in this case they are not.
# Don't treat them as a delimiter.
return 0
if (
leaf.type == token.DOT
and leaf.parent
and leaf.parent.type not in {syms.import_from, syms.dotted_name}
and (previous is None or previous.type in CLOSING_BRACKETS)
):
return DOT_PRIORITY
if (
leaf.type in MATH_OPERATORS
and leaf.parent
and leaf.parent.type not in {syms.factor, syms.star_expr}
):
return MATH_PRIORITIES[leaf.type]
if leaf.type in COMPARATORS:
return COMPARATOR_PRIORITY
if (
leaf.type == token.STRING
and previous is not None
and previous.type == token.STRING
):
return STRING_PRIORITY
if leaf.type not in {token.NAME, token.ASYNC}:
return 0
if (
leaf.value == "for"
and leaf.parent
and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
or leaf.type == token.ASYNC
):
if (
not isinstance(leaf.prev_sibling, Leaf)
or leaf.prev_sibling.value != "async"
):
return COMPREHENSION_PRIORITY
if (
leaf.value == "if"
and leaf.parent
and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
):
return COMPREHENSION_PRIORITY
if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
return TERNARY_PRIORITY
if leaf.value == "is":
return COMPARATOR_PRIORITY
if (
leaf.value == "in"
and leaf.parent
and leaf.parent.type in {syms.comp_op, syms.comparison}
and not (
previous is not None
and previous.type == token.NAME
and previous.value == "not"
)
):
return COMPARATOR_PRIORITY
if (
leaf.value == "not"
and leaf.parent
and leaf.parent.type == syms.comp_op
and not (
previous is not None
and previous.type == token.NAME
and previous.value == "is"
)
):
return COMPARATOR_PRIORITY
if leaf.value in LOGIC_OPERATORS and leaf.parent:
return LOGIC_PRIORITY
return 0
def max_delimiter_priority_in_atom(node: LN) -> Priority:
if node.type != syms.atom:
return 0
first = node.children[0]
last = node.children[-1]
if not (first.type == token.LPAR and last.type == token.RPAR):
return 0
bt = BracketTracker()
for c in node.children[1:-1]:
if isinstance(c, Leaf):
bt.mark(c)
else:
for leaf in c.leaves():
bt.mark(leaf)
try:
return bt.max_delimiter_priority()
except ValueError:
return 0
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]:
try:
# Start with the first opening bracket and ignore closing brackets before.
start_index = next(
i for i, l in enumerate(leaves) if l.type in OPENING_BRACKETS
)
except StopIteration:
return set()
bracket_stack = []
ids = set()
for i in range(start_index, len(leaves)):
leaf = leaves[i]
if leaf.type in OPENING_BRACKETS:
bracket_stack.append((BRACKET[leaf.type], i))
if leaf.type in CLOSING_BRACKETS:
if bracket_stack and leaf.type == bracket_stack[-1][0]:
_, start = bracket_stack.pop()
for j in range(start, i + 1):
ids.add(id(leaves[j]))
else:
break
return ids | --- +++ @@ -1,3 +1,4 @@+"""Builds on top of nodes.py to track brackets."""
from collections.abc import Iterable, Sequence
from dataclasses import dataclass, field
@@ -52,10 +53,12 @@
class BracketMatchError(Exception):
+ """Raised when an opening bracket is unable to be matched to a closing bracket."""
@dataclass
class BracketTracker:
+ """Keeps track of brackets on a line."""
depth: int = 0
bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
@@ -66,6 +69,23 @@ invisible: list[Leaf] = field(default_factory=list)
def mark(self, leaf: Leaf) -> None:
+ """Mark `leaf` with bracket-related metadata. Keep track of delimiters.
+
+ All leaves receive an int `bracket_depth` field that stores how deep
+ within brackets a given leaf is. 0 means there are no enclosing brackets
+ that started on this line.
+
+ If a leaf is itself a closing bracket and there is a matching opening
+ bracket earlier, it receives an `opening_bracket` field with which it forms a
+ pair. This is a one-directional link to avoid reference cycles. Closing
+ bracket without opening happens on lines continued from previous
+ breaks, e.g. `) -> "ReturnType":` as part of a funcdef where we place
+ the return type annotation on its own line of the previous closing RPAR.
+
+ If a leaf is a delimiter (a token on which Black can split the line if
+ needed) and it's on depth 0, its `id()` is stored in the tracker's
+ `delimiters` field.
+ """
if leaf.type == token.COMMENT:
return
@@ -109,15 +129,29 @@ self.maybe_increment_for_loop_variable(leaf)
def any_open_for_or_lambda(self) -> bool:
+ """Return True if there is an open for or lambda expression on the line.
+
+ See maybe_increment_for_loop_variable and maybe_increment_lambda_arguments
+ for details."""
return bool(self._for_loop_depths or self._lambda_argument_depths)
def any_open_brackets(self) -> bool:
+ """Return True if there is an yet unmatched open bracket on the line."""
return bool(self.bracket_match)
def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
+ """Return the highest priority of a delimiter found on the line.
+
+ Values are consistent with what `is_split_*_delimiter()` return.
+ Raises ValueError on no delimiters.
+ """
return max(v for k, v in self.delimiters.items() if k not in exclude)
def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
+ """Return the number of delimiters with the given `priority`.
+
+ If no `priority` is passed, defaults to max priority on the line.
+ """
if not self.delimiters:
return 0
@@ -125,6 +159,11 @@ return sum(1 for p in self.delimiters.values() if p == priority)
def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
+ """In a for loop, or comprehension, the variables are often unpacks.
+
+ To avoid splitting on the comma in this situation, increase the depth of
+ tokens between `for` and `in`.
+ """
if leaf.type == token.NAME and leaf.value == "for":
self.depth += 1
self._for_loop_depths.append(self.depth)
@@ -133,6 +172,7 @@ return False
def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
+ """See `maybe_increment_for_loop_variable` above for explanation."""
if (
self._for_loop_depths
and self._for_loop_depths[-1] == self.depth
@@ -146,6 +186,11 @@ return False
def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
+ """In a lambda expression, there might be more than one argument.
+
+ To avoid splitting on the comma in this situation, increase the depth of
+ tokens between `lambda` and `:`.
+ """
if leaf.type == token.NAME and leaf.value == "lambda":
self.depth += 1
self._lambda_argument_depths.append(self.depth)
@@ -154,6 +199,7 @@ return False
def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
+ """See `maybe_increment_lambda_arguments` above for explanation."""
if (
self._lambda_argument_depths
and self._lambda_argument_depths[-1] == self.depth
@@ -166,10 +212,18 @@ return False
def get_open_lsqb(self) -> Leaf | None:
+ """Return the most recent opening square bracket (if any)."""
return self.bracket_match.get((self.depth - 1, token.RSQB))
def is_split_after_delimiter(leaf: Leaf) -> Priority:
+ """Return the priority of the `leaf` delimiter, given a line break after it.
+
+ The delimiter priorities returned here are from those delimiters that would
+ cause a line break after themselves.
+
+ Higher numbers are higher priority.
+ """
if leaf.type == token.COMMA:
return COMMA_PRIORITY
@@ -177,6 +231,13 @@
def is_split_before_delimiter(leaf: Leaf, previous: Leaf | None = None) -> Priority:
+ """Return the priority of the `leaf` delimiter, given a line break before it.
+
+ The delimiter priorities returned here are from those delimiters that would
+ cause a line break before themselves.
+
+ Higher numbers are higher priority.
+ """
if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
# * and ** might also be MATH_OPERATORS but in this case they are not.
# Don't treat them as a delimiter.
@@ -266,6 +327,11 @@
def max_delimiter_priority_in_atom(node: LN) -> Priority:
+ """Return maximum delimiter priority inside `node`.
+
+ This is specific to atoms with contents contained in a pair of parentheses.
+ If `node` isn't an atom or there are no enclosing parentheses, returns 0.
+ """
if node.type != syms.atom:
return 0
@@ -289,6 +355,11 @@
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]:
+ """Return leaves that are inside matching brackets.
+
+ The input `leaves` can have non-matching brackets at the head or tail parts.
+ Matching brackets are included.
+ """
try:
# Start with the first opening bracket and ignore closing brackets before.
start_index = next(
@@ -309,4 +380,4 @@ ids.add(id(leaves[j]))
else:
break
- return ids+ return ids
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/brackets.py |
Create documentation strings for testing functions | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
from collections.abc import Callable, Iterator
from contextlib import contextmanager
from typing import TYPE_CHECKING, Union, cast
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
# Local imports
from . import grammar, token, tokenize
if TYPE_CHECKING:
from blib2to3.pgen2.driver import TokenProxy
Results = dict[str, NL]
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
DFA = list[list[tuple[int, int]]]
DFAS = tuple[DFA, dict[int, int]]
def lam_sub(grammar: Grammar, node: RawNode) -> NL:
assert node[3] is not None
return Node(type=node[0], children=node[3], context=node[2])
# A placeholder node, used when parser is backtracking.
DUMMY_NODE = (-1, None, None, None)
def stack_copy(
stack: list[tuple[DFAS, int, RawNode]],
) -> list[tuple[DFAS, int, RawNode]]:
return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
class Recorder:
def __init__(self, parser: "Parser", ilabels: list[int], context: Context) -> None:
self.parser = parser
self._ilabels = ilabels
self.context = context # not really matter
self._dead_ilabels: set[int] = set()
self._start_point = self.parser.stack
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
@property
def ilabels(self) -> set[int]:
return self._dead_ilabels.symmetric_difference(self._ilabels)
@contextmanager
def switch_to(self, ilabel: int) -> Iterator[None]:
with self.backtrack():
self.parser.stack = self._points[ilabel]
try:
yield
except ParseError:
self._dead_ilabels.add(ilabel)
finally:
self.parser.stack = self._start_point
@contextmanager
def backtrack(self) -> Iterator[None]:
is_backtracking = self.parser.is_backtracking
try:
self.parser.is_backtracking = True
yield
finally:
self.parser.is_backtracking = is_backtracking
def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
for ilabel in self.ilabels:
with self.switch_to(ilabel):
if raw:
self.parser._addtoken(ilabel, tok_type, tok_val, self.context)
else:
self.parser.addtoken(tok_type, tok_val, self.context)
def determine_route(
self, value: str | None = None, force: bool = False
) -> int | None:
alive_ilabels = self.ilabels
if len(alive_ilabels) == 0:
*_, most_successful_ilabel = self._dead_ilabels
raise ParseError("bad input", most_successful_ilabel, value, self.context)
ilabel, *rest = alive_ilabels
if force or not rest:
return ilabel
else:
return None
class ParseError(Exception):
def __init__(
self, msg: str, type: int | None, value: str | None, context: Context
) -> None:
Exception.__init__(
self, f"{msg}: type={type!r}, value={value!r}, context={context!r}"
)
self.msg = msg
self.type = type
self.value = value
self.context = context
class Parser:
def __init__(self, grammar: Grammar, convert: Convert | None = None) -> None:
self.grammar = grammar
# See note in docstring above. TL;DR this is ignored.
self.convert = convert or lam_sub
self.is_backtracking = False
self.last_token: int | None = None
def setup(self, proxy: "TokenProxy", start: int | None = None) -> None:
if start is None:
start = self.grammar.start
# Each stack entry is a tuple: (dfa, state, node).
# A node is a tuple: (type, value, context, children),
# where children is a list of nodes or None, and context may be None.
newnode: RawNode = (start, None, None, [])
stackentry = (self.grammar.dfas[start], 0, newnode)
self.stack: list[tuple[DFAS, int, RawNode]] = [stackentry]
self.rootnode: NL | None = None
self.used_names: set[str] = set()
self.proxy = proxy
self.last_token = None
def addtoken(self, type: int, value: str, context: Context) -> bool:
# Map from token to label
ilabels = self.classify(type, value, context)
assert len(ilabels) >= 1
# If we have only one state to advance, we'll directly
# take it as is.
if len(ilabels) == 1:
[ilabel] = ilabels
return self._addtoken(ilabel, type, value, context)
# If there are multiple states which we can advance (only
# happen under soft-keywords), then we will try all of them
# in parallel and as soon as one state can reach further than
# the rest, we'll choose that one. This is a pretty hacky
# and hopefully temporary algorithm.
#
# For a more detailed explanation, check out this post:
# https://tree.science/what-the-backtracking.html
with self.proxy.release() as proxy:
counter, force = 0, False
recorder = Recorder(self, ilabels, context)
recorder.add_token(type, value, raw=True)
next_token_value = value
while recorder.determine_route(next_token_value) is None:
if not proxy.can_advance(counter):
force = True
break
next_token_type, next_token_value, *_ = proxy.eat(counter)
if next_token_type in (tokenize.COMMENT, tokenize.NL):
counter += 1
continue
if next_token_type == tokenize.OP:
next_token_type = grammar.opmap[next_token_value]
recorder.add_token(next_token_type, next_token_value)
counter += 1
ilabel = cast(int, recorder.determine_route(next_token_value, force=force))
assert ilabel is not None
return self._addtoken(ilabel, type, value, context)
def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool:
# Loop until the token is shifted; may raise exceptions
while True:
dfa, state, node = self.stack[-1]
states, first = dfa
arcs = states[state]
# Look for a state with this label
for i, newstate in arcs:
t = self.grammar.labels[i][0]
if t >= 256:
# See if it's a symbol and if we're in its first set
itsdfa = self.grammar.dfas[t]
itsstates, itsfirst = itsdfa
if ilabel in itsfirst:
# Push a symbol
self.push(t, itsdfa, newstate, context)
break # To continue the outer while loop
elif ilabel == i:
# Look it up in the list of labels
# Shift a token; we're done with it
self.shift(type, value, newstate, context)
# Pop while we are in an accept-only state
state = newstate
while states[state] == [(0, state)]:
self.pop()
if not self.stack:
# Done parsing!
return True
dfa, state, node = self.stack[-1]
states, first = dfa
# Done with this token
self.last_token = type
return False
else:
if (0, state) in arcs:
# An accepting state, pop it and try something else
self.pop()
if not self.stack:
# Done parsing, but another token is input
raise ParseError("too much input", type, value, context)
else:
# No success finding a transition
raise ParseError("bad input", type, value, context)
def classify(self, type: int, value: str, context: Context) -> list[int]:
if type == token.NAME:
# Keep a listing of all used names
self.used_names.add(value)
# Check for reserved words
if value in self.grammar.keywords:
return [self.grammar.keywords[value]]
elif value in self.grammar.soft_keywords:
assert type in self.grammar.tokens
# Current soft keywords (match, case, type) can only appear at the
# beginning of a statement. So as a shortcut, don't try to treat them
# like keywords in any other context.
# ('_' is also a soft keyword in the real grammar, but for our grammar
# it's just an expression, so we don't need to treat it specially.)
if self.last_token not in (
None,
token.INDENT,
token.DEDENT,
token.NEWLINE,
token.SEMI,
token.COLON,
):
return [self.grammar.tokens[type]]
return [
self.grammar.tokens[type],
self.grammar.soft_keywords[value],
]
ilabel = self.grammar.tokens.get(type)
if ilabel is None:
raise ParseError("bad token", type, value, context)
return [ilabel]
def shift(self, type: int, value: str, newstate: int, context: Context) -> None:
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
self.stack[-1] = (dfa, newstate, DUMMY_NODE)
else:
dfa, state, node = self.stack[-1]
rawnode: RawNode = (type, value, context, None)
newnode = convert(self.grammar, rawnode)
assert node[-1] is not None
node[-1].append(newnode)
self.stack[-1] = (dfa, newstate, node)
def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None:
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
self.stack[-1] = (dfa, newstate, DUMMY_NODE)
self.stack.append((newdfa, 0, DUMMY_NODE))
else:
dfa, state, node = self.stack[-1]
newnode: RawNode = (type, None, context, [])
self.stack[-1] = (dfa, newstate, node)
self.stack.append((newdfa, 0, newnode))
def pop(self) -> None:
if self.is_backtracking:
self.stack.pop()
else:
popdfa, popstate, popnode = self.stack.pop()
newnode = convert(self.grammar, popnode)
if self.stack:
dfa, state, node = self.stack[-1]
assert node[-1] is not None
node[-1].append(newnode)
else:
self.rootnode = newnode
self.rootnode.used_names = self.used_names | --- +++ @@ -1,6 +1,14 @@ # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
+"""Parser engine for the grammar tables generated by pgen.
+
+The grammar table must be loaded first.
+
+See Parser/parser.c in the Python distribution for additional info on
+how this parsing engine works.
+
+"""
from collections.abc import Callable, Iterator
from contextlib import contextmanager
@@ -34,6 +42,7 @@ def stack_copy(
stack: list[tuple[DFAS, int, RawNode]],
) -> list[tuple[DFAS, int, RawNode]]:
+ """Nodeless stack copy."""
return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
@@ -64,6 +73,15 @@
@contextmanager
def backtrack(self) -> Iterator[None]:
+ """
+ Use the node-level invariant ones for basic parsing operations (push/pop/shift).
+ These still will operate on the stack; but they won't create any new nodes, or
+ modify the contents of any other existing nodes.
+
+ This saves us a ton of time when we are backtracking, since we
+ want to restore to the initial state as quick as possible, which
+ can only be done by having as little mutatations as possible.
+ """
is_backtracking = self.parser.is_backtracking
try:
self.parser.is_backtracking = True
@@ -95,6 +113,7 @@
class ParseError(Exception):
+ """Exception to signal the parser is stuck."""
def __init__(
self, msg: str, type: int | None, value: str | None, context: Context
@@ -109,8 +128,69 @@
class Parser:
+ """Parser engine.
+
+ The proper usage sequence is:
+
+ p = Parser(grammar, [converter]) # create instance
+ p.setup([start]) # prepare for parsing
+ <for each input token>:
+ if p.addtoken(...): # parse a token; may raise ParseError
+ break
+ root = p.rootnode # root of abstract syntax tree
+
+ A Parser instance may be reused by calling setup() repeatedly.
+
+ A Parser instance contains state pertaining to the current token
+ sequence, and should not be used concurrently by different threads
+ to parse separate token sequences.
+
+ See driver.py for how to get input tokens by tokenizing a file or
+ string.
+
+ Parsing is complete when addtoken() returns True; the root of the
+ abstract syntax tree can then be retrieved from the rootnode
+ instance variable. When a syntax error occurs, addtoken() raises
+ the ParseError exception. There is no error recovery; the parser
+ cannot be used after a syntax error was reported (but it can be
+ reinitialized by calling setup()).
+
+ """
def __init__(self, grammar: Grammar, convert: Convert | None = None) -> None:
+ """Constructor.
+
+ The grammar argument is a grammar.Grammar instance; see the
+ grammar module for more information.
+
+ The parser is not ready yet for parsing; you must call the
+ setup() method to get it started.
+
+ The optional convert argument is a function mapping concrete
+ syntax tree nodes to abstract syntax tree nodes. If not
+ given, no conversion is done and the syntax tree produced is
+ the concrete syntax tree. If given, it must be a function of
+ two arguments, the first being the grammar (a grammar.Grammar
+ instance), and the second being the concrete syntax tree node
+ to be converted. The syntax tree is converted from the bottom
+ up.
+
+ **post-note: the convert argument is ignored since for Black's
+ usage, convert will always be blib2to3.pytree.convert. Allowing
+ this to be dynamic hurts mypyc's ability to use early binding.
+ These docs are left for historical and informational value.
+
+ A concrete syntax tree node is a (type, value, context, nodes)
+ tuple, where type is the node type (a token or symbol number),
+ value is None for symbols and a string for tokens, context is
+ None or an opaque value used for error reporting (typically a
+ (lineno, offset) pair), and nodes is a list of children for
+ symbols, and None for tokens.
+
+ An abstract syntax tree node may be anything; this is entirely
+ up to the converter function.
+
+ """
self.grammar = grammar
# See note in docstring above. TL;DR this is ignored.
self.convert = convert or lam_sub
@@ -118,6 +198,18 @@ self.last_token: int | None = None
def setup(self, proxy: "TokenProxy", start: int | None = None) -> None:
+ """Prepare for parsing.
+
+ This *must* be called before starting to parse.
+
+ The optional argument is an alternative start symbol; it
+ defaults to the grammar's start symbol.
+
+ You can use a Parser instance to parse any number of programs;
+ each time you call setup() the parser is reset to an initial
+ state determined by the (implicit or explicit) start symbol.
+
+ """
if start is None:
start = self.grammar.start
# Each stack entry is a tuple: (dfa, state, node).
@@ -132,6 +224,7 @@ self.last_token = None
def addtoken(self, type: int, value: str, context: Context) -> bool:
+ """Add a token; return True iff this is the end of the program."""
# Map from token to label
ilabels = self.classify(type, value, context)
assert len(ilabels) >= 1
@@ -225,6 +318,10 @@ raise ParseError("bad input", type, value, context)
def classify(self, type: int, value: str, context: Context) -> list[int]:
+ """Turn a token into a label. (Internal)
+
+ Depending on whether the value is a soft-keyword or not,
+ this function may return multiple labels to choose from."""
if type == token.NAME:
# Keep a listing of all used names
self.used_names.add(value)
@@ -258,6 +355,7 @@ return [ilabel]
def shift(self, type: int, value: str, newstate: int, context: Context) -> None:
+ """Shift a token. (Internal)"""
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
self.stack[-1] = (dfa, newstate, DUMMY_NODE)
@@ -270,6 +368,7 @@ self.stack[-1] = (dfa, newstate, node)
def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None:
+ """Push a nonterminal. (Internal)"""
if self.is_backtracking:
dfa, state, _ = self.stack[-1]
self.stack[-1] = (dfa, newstate, DUMMY_NODE)
@@ -281,6 +380,7 @@ self.stack.append((newdfa, 0, newnode))
def pop(self) -> None:
+ """Pop a nonterminal. (Internal)"""
if self.is_backtracking:
self.stack.pop()
else:
@@ -292,4 +392,4 @@ node[-1].append(newnode)
else:
self.rootnode = newnode
- self.rootnode.used_names = self.used_names+ self.rootnode.used_names = self.used_names
| https://raw.githubusercontent.com/psf/black/HEAD/src/blib2to3/pgen2/parse.py |
Turn comments into proper docstrings | # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
# All rights reserved.
# mypy: allow-untyped-defs, allow-untyped-calls
import sys
from collections.abc import Iterator
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.token import (
ASYNC,
AWAIT,
COMMENT,
DEDENT,
ENDMARKER,
FSTRING_END,
FSTRING_MIDDLE,
FSTRING_START,
INDENT,
NAME,
NEWLINE,
NL,
NUMBER,
OP,
STRING,
TSTRING_END,
TSTRING_MIDDLE,
TSTRING_START,
tok_name,
)
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__credits__ = "GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro"
import pytokens
from pytokens import TokenType
from . import token as _token
__all__ = [x for x in dir(_token) if x[0] != "_"] + [
"tokenize",
"generate_tokens",
"untokenize",
]
del _token
Coord = tuple[int, int]
TokenInfo = tuple[int, str, Coord, Coord, str]
TOKEN_TYPE_MAP = {
TokenType.indent: INDENT,
TokenType.dedent: DEDENT,
TokenType.newline: NEWLINE,
TokenType.nl: NL,
TokenType.comment: COMMENT,
TokenType.semicolon: OP,
TokenType.lparen: OP,
TokenType.rparen: OP,
TokenType.lbracket: OP,
TokenType.rbracket: OP,
TokenType.lbrace: OP,
TokenType.rbrace: OP,
TokenType.colon: OP,
TokenType.op: OP,
TokenType.identifier: NAME,
TokenType.number: NUMBER,
TokenType.string: STRING,
TokenType.fstring_start: FSTRING_START,
TokenType.fstring_middle: FSTRING_MIDDLE,
TokenType.fstring_end: FSTRING_END,
TokenType.tstring_start: TSTRING_START,
TokenType.tstring_middle: TSTRING_MIDDLE,
TokenType.tstring_end: TSTRING_END,
TokenType.endmarker: ENDMARKER,
}
class TokenError(Exception): ...
def transform_whitespace(
token: pytokens.Token, source: str, prev_token: pytokens.Token | None
) -> pytokens.Token:
if (
token.type == TokenType.whitespace
and prev_token is not None
and prev_token.type not in (TokenType.nl, TokenType.newline)
):
token_str = source[token.start_index : token.end_index]
if token_str.startswith("\\\r\n"):
return pytokens.Token(
TokenType.nl,
token.start_index,
token.start_index + 3,
token.start_line,
token.start_col,
token.start_line,
token.start_col + 3,
)
elif token_str.startswith("\\\n") or token_str.startswith("\\\r"):
return pytokens.Token(
TokenType.nl,
token.start_index,
token.start_index + 2,
token.start_line,
token.start_col,
token.start_line,
token.start_col + 2,
)
return token
def tokenize(source: str, grammar: Grammar | None = None) -> Iterator[TokenInfo]:
lines = source.split("\n")
lines += [""] # For newline tokens in files that don't end in a newline
line, column = 1, 0
prev_token: pytokens.Token | None = None
try:
for token in pytokens.tokenize(source):
token = transform_whitespace(token, source, prev_token)
line, column = token.start_line, token.start_col
if token.type == TokenType.whitespace:
continue
token_str = source[token.start_index : token.end_index]
if token.type == TokenType.newline and token_str == "":
# Black doesn't yield empty newline tokens at the end of a file
# if there's no newline at the end of a file.
prev_token = token
continue
source_line = lines[token.start_line - 1]
if token.type == TokenType.identifier and token_str in ("async", "await"):
# Black uses `async` and `await` token types just for those two keywords
yield (
ASYNC if token_str == "async" else AWAIT,
token_str,
(token.start_line, token.start_col),
(token.end_line, token.end_col),
source_line,
)
elif token.type == TokenType.op and token_str == "...":
# Black doesn't have an ellipsis token yet, yield 3 DOTs instead
assert token.start_line == token.end_line
assert token.end_col == token.start_col + 3
token_str = "."
for start_col in range(token.start_col, token.start_col + 3):
end_col = start_col + 1
yield (
TOKEN_TYPE_MAP[token.type],
token_str,
(token.start_line, start_col),
(token.end_line, end_col),
source_line,
)
else:
token_type = TOKEN_TYPE_MAP.get(token.type)
if token_type is None:
raise ValueError(f"Unknown token type: {token.type!r}")
yield (
TOKEN_TYPE_MAP[token.type],
token_str,
(token.start_line, token.start_col),
(token.end_line, token.end_col),
source_line,
)
prev_token = token
except pytokens.UnexpectedEOF:
raise TokenError("Unexpected EOF in multi-line statement", (line, column))
except pytokens.TokenizeError as exc:
raise TokenError(f"Failed to parse: {type(exc).__name__}", (line, column))
def printtoken(
type: int, token: str, srow_col: Coord, erow_col: Coord, line: str
) -> None: # for testing
srow, scol = srow_col
erow, ecol = erow_col
print(f"{srow},{scol}-{erow},{ecol}:\t{tok_name[type]}\t{token!r}")
if __name__ == "__main__": # testing
if len(sys.argv) > 1:
token_iterator = tokenize(open(sys.argv[1]).read())
else:
token_iterator = tokenize(sys.stdin.read())
for tok in token_iterator:
printtoken(*tok) | --- +++ @@ -3,6 +3,29 @@
# mypy: allow-untyped-defs, allow-untyped-calls
+"""Tokenization help for Python programs.
+
+generate_tokens(readline) is a generator that breaks a stream of
+text into Python tokens. It accepts a readline-like method which is called
+repeatedly to get the next line of input (or "" for EOF). It generates
+5-tuples with these members:
+
+ the token type (see token.py)
+ the token (a string)
+ the starting (row, column) indices of the token (a 2-tuple of ints)
+ the ending (row, column) indices of the token (a 2-tuple of ints)
+ the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators
+
+Older entry points
+ tokenize_loop(readline, tokeneater)
+ tokenize(readline, tokeneater=printtoken)
+are the same, except instead of generating tokens, tokeneater is a callback
+function to which the 5 fields described above are passed as 5 arguments,
+each time a new token is found."""
import sys
from collections.abc import Iterator
@@ -82,6 +105,12 @@ def transform_whitespace(
token: pytokens.Token, source: str, prev_token: pytokens.Token | None
) -> pytokens.Token:
+ r"""
+ Black treats `\\\n` at the end of a line as a 'NL' token, while it
+ is ignored as whitespace in the regular Python parser.
+ But, only the first one. If there's a `\\\n` following it
+ (as in, a \ just by itself on a line), that is not made into NL.
+ """
if (
token.type == TokenType.whitespace
and prev_token is not None
@@ -194,4 +223,4 @@ token_iterator = tokenize(sys.stdin.read())
for tok in token_iterator:
- printtoken(*tok)+ printtoken(*tok)
| https://raw.githubusercontent.com/psf/black/HEAD/src/blib2to3/pgen2/tokenize.py |
Generate docstrings for each module | import io
import json
import platform
import re
import sys
import tokenize
import traceback
from collections.abc import (
Collection,
Generator,
MutableMapping,
Sequence,
)
from contextlib import nullcontext
from dataclasses import replace
from datetime import datetime, timezone
from enum import Enum
from json.decoder import JSONDecodeError
from pathlib import Path
from re import Pattern
from typing import Any
import click
from click.core import ParameterSource
from mypy_extensions import mypyc_attr
from pathspec import GitIgnoreSpec
from pathspec.patterns.gitignore import GitIgnorePatternError
from _black_version import version as __version__
from black.cache import Cache
from black.comments import normalize_fmt_off
from black.const import (
DEFAULT_EXCLUDES,
DEFAULT_INCLUDES,
DEFAULT_LINE_LENGTH,
STDIN_PLACEHOLDER,
)
from black.files import (
best_effort_relative_path,
find_project_root,
find_pyproject_toml,
find_user_pyproject_toml,
gen_python_files,
get_gitignore,
parse_pyproject_toml,
path_is_excluded,
resolves_outside_root_or_cannot_stat,
wrap_stream_for_windows,
)
from black.handle_ipynb_magics import (
PYTHON_CELL_MAGICS,
jupyter_dependencies_are_installed,
mask_cell,
put_trailing_semicolon_back,
remove_trailing_semicolon,
unmask_cell,
validate_cell,
)
from black.linegen import LN, LineGenerator, transform_line
from black.lines import EmptyLineTracker, LinesBlock
from black.mode import FUTURE_FLAG_TO_FEATURE, VERSION_TO_FEATURES, Feature
from black.mode import Mode as Mode # re-exported
from black.mode import Preview, TargetVersion, supports_feature
from black.nodes import STARS, is_number_token, is_simple_decorator_expression, syms
from black.output import color_diff, diff, dump_to_file, err, ipynb_diff, out
from black.parsing import ( # noqa F401
ASTSafetyError,
InvalidInput,
lib2to3_parse,
parse_ast,
stringify_ast,
)
from black.ranges import (
adjusted_lines,
convert_unchanged_lines,
parse_line_ranges,
sanitized_lines,
)
from black.report import Changed, NothingChanged, Report
from blib2to3.pgen2 import token
from blib2to3.pytree import Leaf, Node
COMPILED = Path(__file__).suffix in (".pyd", ".so")
# types
FileContent = str
Encoding = str
NewLine = str
class WriteBack(Enum):
NO = 0
YES = 1
DIFF = 2
CHECK = 3
COLOR_DIFF = 4
@classmethod
def from_configuration(
cls, *, check: bool, diff: bool, color: bool = False
) -> "WriteBack":
if check and not diff:
return cls.CHECK
if diff and color:
return cls.COLOR_DIFF
return cls.DIFF if diff else cls.YES
# Legacy name, left for integrations.
FileMode = Mode
def read_pyproject_toml(
ctx: click.Context, param: click.Parameter, value: str | None
) -> str | None:
if not value:
value = find_pyproject_toml(
ctx.params.get("src", ()), ctx.params.get("stdin_filename", None)
)
if value is None:
return None
try:
config = parse_pyproject_toml(value)
except (OSError, ValueError) as e:
raise click.FileError(
filename=value, hint=f"Error reading configuration file: {e}"
) from None
if not config:
return None
else:
spellcheck_pyproject_toml_keys(ctx, list(config), value)
# Sanitize the values to be Click friendly. For more information please see:
# https://github.com/psf/black/issues/1458
# https://github.com/pallets/click/issues/1567
config = {
k: str(v) if not isinstance(v, (list, dict)) else v
for k, v in config.items()
}
target_version = config.get("target_version")
if target_version is not None and not isinstance(target_version, list):
raise click.BadOptionUsage(
"target-version", "Config key target-version must be a list"
)
exclude = config.get("exclude")
if exclude is not None and not isinstance(exclude, str):
raise click.BadOptionUsage("exclude", "Config key exclude must be a string")
extend_exclude = config.get("extend_exclude")
if extend_exclude is not None and not isinstance(extend_exclude, str):
raise click.BadOptionUsage(
"extend-exclude", "Config key extend-exclude must be a string"
)
line_ranges = config.get("line_ranges")
if line_ranges is not None:
raise click.BadOptionUsage(
"line-ranges", "Cannot use line-ranges in the pyproject.toml file."
)
default_map: dict[str, Any] = {}
if ctx.default_map:
default_map.update(ctx.default_map)
default_map.update(config)
ctx.default_map = default_map
return value
def spellcheck_pyproject_toml_keys(
ctx: click.Context, config_keys: list[str], config_file_path: str
) -> None:
invalid_keys: list[str] = []
available_config_options = {param.name for param in ctx.command.params}
invalid_keys = [key for key in config_keys if key not in available_config_options]
if invalid_keys:
keys_str = ", ".join(map(repr, invalid_keys))
out(
f"Invalid config keys detected: {keys_str} (in {config_file_path})",
fg="red",
)
def target_version_option_callback(
c: click.Context, p: click.Option | click.Parameter, v: tuple[str, ...]
) -> list[TargetVersion]:
return [TargetVersion[val.upper()] for val in v]
def _target_versions_exceed_runtime(
target_versions: set[TargetVersion],
) -> bool:
if not target_versions:
return False
max_target_minor = max(tv.value for tv in target_versions)
return max_target_minor > sys.version_info[1]
def _version_mismatch_message(target_versions: set[TargetVersion]) -> str:
max_target = max(target_versions, key=lambda tv: tv.value)
runtime = f"{sys.version_info[0]}.{sys.version_info[1]}"
return (
f"Python {runtime} cannot parse code formatted for"
f" {max_target.pretty()}. To fix this: run Black with"
f" {max_target.pretty()}, set --target-version to"
f" py3{sys.version_info[1]}, or use --fast to skip the safety"
" check."
)
def enable_unstable_feature_callback(
c: click.Context, p: click.Option | click.Parameter, v: tuple[str, ...]
) -> list[Preview]:
return [Preview[val] for val in v]
def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
if "\n" in regex:
regex = "(?x)" + regex
compiled: Pattern[str] = re.compile(regex)
return compiled
def validate_regex(
ctx: click.Context,
param: click.Parameter,
value: str | None,
) -> Pattern[str] | None:
try:
return re_compile_maybe_verbose(value) if value is not None else None
except re.error as e:
raise click.BadParameter(f"Not a valid regular expression: {e}") from None
@click.command(
context_settings={"help_option_names": ["-h", "--help"]},
# While Click does set this field automatically using the docstring, mypyc
# (annoyingly) strips 'em so we need to set it here too.
help="The uncompromising code formatter.",
)
@click.option("-c", "--code", type=str, help="Format the code passed in as a string.")
@click.option(
"-l",
"--line-length",
type=int,
default=DEFAULT_LINE_LENGTH,
help="How many characters per line to allow.",
show_default=True,
)
@click.option(
"-t",
"--target-version",
type=click.Choice([v.name.lower() for v in TargetVersion]),
callback=target_version_option_callback,
multiple=True,
help=(
"Python versions that should be supported by Black's output. You should"
" include all versions that your code supports. By default, Black will infer"
" target versions from the project metadata in pyproject.toml. If this does"
" not yield conclusive results, Black will use per-file auto-detection."
),
)
@click.option(
"--pyi",
is_flag=True,
help=(
"Format all input files like typing stubs regardless of file extension. This"
" is useful when piping source on standard input."
),
)
@click.option(
"--ipynb",
is_flag=True,
help=(
"Format all input files like Jupyter Notebooks regardless of file extension."
" This is useful when piping source on standard input."
),
)
@click.option(
"--python-cell-magics",
multiple=True,
help=(
"When processing Jupyter Notebooks, add the given magic to the list"
f" of known python-magics ({', '.join(sorted(PYTHON_CELL_MAGICS))})."
" Useful for formatting cells with custom python magics."
),
default=[],
)
@click.option(
"-x",
"--skip-source-first-line",
is_flag=True,
help="Skip the first line of the source code.",
)
@click.option(
"-S",
"--skip-string-normalization",
is_flag=True,
help="Don't normalize string quotes or prefixes.",
)
@click.option(
"-C",
"--skip-magic-trailing-comma",
is_flag=True,
help="Don't use trailing commas as a reason to split lines.",
)
@click.option(
"--preview",
is_flag=True,
help=(
"Enable potentially disruptive style changes that may be added to Black's main"
" functionality in the next major release."
),
)
@click.option(
"--unstable",
is_flag=True,
help=(
"Enable potentially disruptive style changes that have known bugs or are not"
" currently expected to make it into the stable style Black's next major"
" release. Implies --preview."
),
)
@click.option(
"--enable-unstable-feature",
type=click.Choice([v.name for v in Preview]),
callback=enable_unstable_feature_callback,
multiple=True,
help=(
"Enable specific features included in the `--unstable` style. Requires"
" `--preview`. No compatibility guarantees are provided on the behavior"
" or existence of any unstable features."
),
)
@click.option(
"--check",
is_flag=True,
help=(
"Don't write the files back, just return the status. Return code 0 means"
" nothing would change. Return code 1 means some files would be reformatted."
" Return code 123 means there was an internal error."
),
)
@click.option(
"--diff",
is_flag=True,
help=(
"Don't write the files back, just output a diff to indicate what changes"
" Black would've made. They are printed to stdout so capturing them is simple."
),
)
@click.option(
"--color/--no-color",
is_flag=True,
help="Show (or do not show) colored diff. Only applies when --diff is given.",
)
@click.option(
"--line-ranges",
multiple=True,
metavar="START-END",
help=(
"When specified, Black will try its best to only format these lines. This"
" option can be specified multiple times, and a union of the lines will be"
" formatted. Each range must be specified as two integers connected by a `-`:"
" `<START>-<END>`. The `<START>` and `<END>` integer indices are 1-based and"
" inclusive on both ends."
),
default=(),
)
@click.option(
"--fast/--safe",
is_flag=True,
help=(
"By default, Black performs an AST safety check after formatting your code."
" The --fast flag turns off this check and the --safe flag explicitly enables"
" it. [default: --safe]"
),
)
@click.option(
"--required-version",
type=str,
help=(
"Require a specific version of Black to be running. This is useful for"
" ensuring that all contributors to your project are using the same"
" version, because different versions of Black may format code a little"
" differently. This option can be set in a configuration file for consistent"
" results across environments."
),
)
@click.option(
"--exclude",
type=str,
callback=validate_regex,
help=(
"A regular expression that matches files and directories that should be"
" excluded on recursive searches. An empty value means no paths are excluded."
" Use forward slashes for directories on all platforms (Windows, too)."
" By default, Black also ignores all paths listed in .gitignore. Changing this"
f" value will override all default exclusions. [default: {DEFAULT_EXCLUDES}]"
),
show_default=False,
)
@click.option(
"--extend-exclude",
type=str,
callback=validate_regex,
help=(
"Like --exclude, but adds additional files and directories on top of the"
" default values instead of overriding them."
),
)
@click.option(
"--force-exclude",
type=str,
callback=validate_regex,
help=(
"Like --exclude, but files and directories matching this regex will be excluded"
" even when they are passed explicitly as arguments. This is useful when"
" invoking Black programmatically on changed files, such as in a pre-commit"
" hook or editor plugin."
),
)
@click.option(
"--stdin-filename",
type=str,
is_eager=True,
help=(
"The name of the file when passing it through stdin. Useful to make sure Black"
" will respect the --force-exclude option on some editors that rely on using"
" stdin."
),
)
@click.option(
"--include",
type=str,
default=DEFAULT_INCLUDES,
callback=validate_regex,
help=(
"A regular expression that matches files and directories that should be"
" included on recursive searches. An empty value means all files are included"
" regardless of the name. Use forward slashes for directories on all platforms"
" (Windows, too). Overrides all exclusions, including from .gitignore and"
" command line options."
),
show_default=True,
)
@click.option(
"-W",
"--workers",
type=click.IntRange(min=1),
default=None,
help=(
"When Black formats multiple files, it may use a process pool to speed up"
" formatting. This option controls the number of parallel workers. This can"
" also be specified via the BLACK_NUM_WORKERS environment variable. Defaults"
" to the number of CPUs in the system."
),
)
@click.option(
"-q",
"--quiet",
is_flag=True,
help=(
"Stop emitting all non-critical output. Error messages will still be emitted"
" (which can silenced by 2>/dev/null)."
),
)
@click.option(
"-v",
"--verbose",
is_flag=True,
help=(
"Emit messages about files that were not changed or were ignored due to"
" exclusion patterns. If Black is using a configuration file, a message"
" detailing which one it is using will be emitted."
),
)
@click.version_option(
version=__version__,
message=(
f"%(prog)s, %(version)s (compiled: {'yes' if COMPILED else 'no'})\n"
f"Python ({platform.python_implementation()}) {platform.python_version()}"
),
)
@click.argument(
"src",
nargs=-1,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True
),
is_eager=True,
metavar="SRC ...",
)
@click.option(
"--config",
type=click.Path(
exists=True,
file_okay=True,
dir_okay=False,
readable=True,
allow_dash=False,
path_type=str,
),
is_eager=True,
callback=read_pyproject_toml,
help="Read configuration options from a configuration file.",
)
@click.option(
"--no-cache",
is_flag=True,
help=(
"Skip reading and writing the cache, forcing Black to reformat all"
" included files."
),
)
@click.pass_context
def main(
ctx: click.Context,
code: str | None,
line_length: int,
target_version: list[TargetVersion],
check: bool,
diff: bool,
line_ranges: Sequence[str],
color: bool,
fast: bool,
pyi: bool,
ipynb: bool,
python_cell_magics: Sequence[str],
skip_source_first_line: bool,
skip_string_normalization: bool,
skip_magic_trailing_comma: bool,
preview: bool,
unstable: bool,
enable_unstable_feature: list[Preview],
quiet: bool,
verbose: bool,
required_version: str | None,
include: Pattern[str],
exclude: Pattern[str] | None,
extend_exclude: Pattern[str] | None,
force_exclude: Pattern[str] | None,
stdin_filename: str | None,
workers: int | None,
src: tuple[str, ...],
config: str | None,
no_cache: bool,
) -> None:
ctx.ensure_object(dict)
assert sys.version_info >= (3, 10), "Black requires Python 3.10+"
if sys.version_info[:3] == (3, 12, 5):
out(
"Python 3.12.5 has a memory safety issue that can cause Black's "
"AST safety checks to fail. "
"Please upgrade to Python 3.12.6 or downgrade to Python 3.12.4"
)
ctx.exit(1)
if src and code is not None:
out(
main.get_usage(ctx)
+ "\n\n'SRC' and 'code' cannot be passed simultaneously."
)
ctx.exit(1)
if not src and code is None:
out(main.get_usage(ctx) + "\n\nOne of 'SRC' or 'code' is required.")
ctx.exit(1)
# It doesn't do anything if --unstable is also passed, so just allow it.
if enable_unstable_feature and not (preview or unstable):
out(
main.get_usage(ctx)
+ "\n\n'--enable-unstable-feature' requires '--preview'."
)
ctx.exit(1)
root, method = (
find_project_root(src, stdin_filename) if code is None else (None, None)
)
ctx.obj["root"] = root
if verbose:
if root:
out(
f"Identified `{root}` as project root containing a {method}.",
fg="blue",
)
if config:
config_source = ctx.get_parameter_source("config")
user_level_config = str(find_user_pyproject_toml())
if config == user_level_config:
out(
"Using configuration from user-level config at "
f"'{user_level_config}'.",
fg="blue",
)
elif config_source in (
ParameterSource.DEFAULT,
ParameterSource.DEFAULT_MAP,
):
out("Using configuration from project root.", fg="blue")
else:
out(f"Using configuration in '{config}'.", fg="blue")
if ctx.default_map:
for param, value in ctx.default_map.items():
out(f"{param}: {value}")
error_msg = "Oh no! 💥 💔 💥"
if (
required_version
and required_version != __version__
and required_version != __version__.split(".")[0]
):
err(
f"{error_msg} The required version `{required_version}` does not match"
f" the running version `{__version__}`!"
)
ctx.exit(1)
if ipynb and pyi:
err("Cannot pass both `pyi` and `ipynb` flags!")
ctx.exit(1)
write_back = WriteBack.from_configuration(check=check, diff=diff, color=color)
if target_version:
versions = set(target_version)
else:
# We'll autodetect later.
versions = set()
mode = Mode(
target_versions=versions,
line_length=line_length,
is_pyi=pyi,
is_ipynb=ipynb,
skip_source_first_line=skip_source_first_line,
string_normalization=not skip_string_normalization,
magic_trailing_comma=not skip_magic_trailing_comma,
preview=preview,
unstable=unstable,
python_cell_magics=set(python_cell_magics),
enabled_features=set(enable_unstable_feature),
)
if not fast and _target_versions_exceed_runtime(versions):
err(
f"Warning: {_version_mismatch_message(versions)} Black's safety"
" check verifies equivalence by parsing the AST, which fails"
" when the running Python is older than the target version.",
fg="yellow",
)
lines: list[tuple[int, int]] = []
if line_ranges:
if ipynb:
err("Cannot use --line-ranges with ipynb files.")
ctx.exit(1)
try:
lines = parse_line_ranges(line_ranges)
except ValueError as e:
err(str(e))
ctx.exit(1)
if code is not None:
# Run in quiet mode by default with -c; the extra output isn't useful.
# You can still pass -v to get verbose output.
quiet = True
report = Report(check=check, diff=diff, quiet=quiet, verbose=verbose)
if code is not None:
reformat_code(
content=code,
fast=fast,
write_back=write_back,
mode=mode,
report=report,
lines=lines,
)
else:
assert root is not None # root is only None if code is not None
try:
sources = get_sources(
root=root,
src=src,
quiet=quiet,
verbose=verbose,
include=include,
exclude=exclude,
extend_exclude=extend_exclude,
force_exclude=force_exclude,
report=report,
stdin_filename=stdin_filename,
)
except GitIgnorePatternError:
ctx.exit(1)
if not sources:
if verbose or not quiet:
out("No Python files are present to be formatted. Nothing to do 😴")
if "-" in src:
sys.stdout.write(sys.stdin.read())
ctx.exit(0)
if len(sources) == 1:
reformat_one(
src=sources.pop(),
fast=fast,
write_back=write_back,
mode=mode,
report=report,
lines=lines,
no_cache=no_cache,
)
else:
from black.concurrency import reformat_many
if lines:
err("Cannot use --line-ranges to format multiple files.")
ctx.exit(1)
reformat_many(
sources=sources,
fast=fast,
write_back=write_back,
mode=mode,
report=report,
workers=workers,
no_cache=no_cache,
)
if verbose or not quiet:
if code is None and (verbose or report.change_count or report.failure_count):
out()
out(error_msg if report.return_code else "All done! ✨ 🍰 ✨")
if code is None:
click.echo(str(report), err=True)
ctx.exit(report.return_code)
def get_sources(
*,
root: Path,
src: tuple[str, ...],
quiet: bool,
verbose: bool,
include: Pattern[str],
exclude: Pattern[str] | None,
extend_exclude: Pattern[str] | None,
force_exclude: Pattern[str] | None,
report: "Report",
stdin_filename: str | None,
) -> set[Path]:
sources: set[Path] = set()
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
using_default_exclude = exclude is None
exclude = re_compile_maybe_verbose(DEFAULT_EXCLUDES) if exclude is None else exclude
gitignore: dict[Path, GitIgnoreSpec] | None = None
root_gitignore = get_gitignore(root)
for s in src:
if s == "-" and stdin_filename:
path = Path(stdin_filename)
if path_is_excluded(stdin_filename, force_exclude):
report.path_ignored(
path,
"--stdin-filename matches the --force-exclude regular expression",
)
continue
is_stdin = True
else:
path = Path(s)
is_stdin = False
# Compare the logic here to the logic in `gen_python_files`.
if is_stdin or path.is_file():
if resolves_outside_root_or_cannot_stat(path, root, report):
if verbose:
out(f'Skipping invalid source: "{path}"', fg="red")
continue
root_relative_path = best_effort_relative_path(path, root).as_posix()
root_relative_path = "/" + root_relative_path
# Hard-exclude any files that matches the `--force-exclude` regex.
if path_is_excluded(root_relative_path, force_exclude):
report.path_ignored(
path, "matches the --force-exclude regular expression"
)
continue
if is_stdin:
path = Path(f"{STDIN_PLACEHOLDER}{path}")
if path.suffix == ".ipynb" and not jupyter_dependencies_are_installed(
warn=verbose or not quiet
):
continue
if verbose:
out(f'Found input source: "{path}"', fg="blue")
sources.add(path)
elif path.is_dir():
path = root / (path.resolve().relative_to(root))
if verbose:
out(f'Found input source directory: "{path}"', fg="blue")
if using_default_exclude:
gitignore = {
root: root_gitignore,
path: get_gitignore(path),
}
sources.update(
gen_python_files(
path.iterdir(),
root,
include,
exclude,
extend_exclude,
force_exclude,
report,
gitignore,
verbose=verbose,
quiet=quiet,
)
)
elif s == "-":
if verbose:
out("Found input source stdin", fg="blue")
sources.add(path)
else:
err(f"invalid path: {s}")
return sources
def reformat_code(
content: str,
fast: bool,
write_back: WriteBack,
mode: Mode,
report: Report,
*,
lines: Collection[tuple[int, int]] = (),
) -> None:
path = Path("<string>")
try:
changed = Changed.NO
if format_stdin_to_stdout(
content=content, fast=fast, write_back=write_back, mode=mode, lines=lines
):
changed = Changed.YES
report.done(path, changed)
except Exception as exc:
if report.verbose:
traceback.print_exc()
report.failed(path, str(exc))
# diff-shades depends on being to monkeypatch this function to operate. I know it's
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
@mypyc_attr(patchable=True)
def reformat_one(
src: Path,
fast: bool,
write_back: WriteBack,
mode: Mode,
report: "Report",
*,
lines: Collection[tuple[int, int]] = (),
no_cache: bool = False,
) -> None:
try:
changed = Changed.NO
if str(src) == "-":
is_stdin = True
elif str(src).startswith(STDIN_PLACEHOLDER):
is_stdin = True
# Use the original name again in case we want to print something
# to the user
src = Path(str(src)[len(STDIN_PLACEHOLDER) :])
else:
is_stdin = False
if is_stdin:
if src.suffix == ".pyi":
mode = replace(mode, is_pyi=True)
elif src.suffix == ".ipynb":
mode = replace(mode, is_ipynb=True)
if format_stdin_to_stdout(
fast=fast, write_back=write_back, mode=mode, lines=lines
):
changed = Changed.YES
else:
cache = None if no_cache else Cache.read(mode)
if cache is not None and write_back not in (
WriteBack.DIFF,
WriteBack.COLOR_DIFF,
):
if not cache.is_changed(src):
changed = Changed.CACHED
if changed is not Changed.CACHED and format_file_in_place(
src, fast=fast, write_back=write_back, mode=mode, lines=lines
):
changed = Changed.YES
if cache is not None and (
(write_back is WriteBack.YES and changed is not Changed.CACHED)
or (write_back is WriteBack.CHECK and changed is Changed.NO)
):
cache.write([src])
report.done(src, changed)
except Exception as exc:
if report.verbose:
traceback.print_exc()
report.failed(src, str(exc))
def format_file_in_place(
src: Path,
fast: bool,
mode: Mode,
write_back: WriteBack = WriteBack.NO,
lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy
*,
lines: Collection[tuple[int, int]] = (),
) -> bool:
if src.suffix == ".pyi":
mode = replace(mode, is_pyi=True)
elif src.suffix == ".ipynb":
mode = replace(mode, is_ipynb=True)
then = datetime.fromtimestamp(src.stat().st_mtime, timezone.utc)
header = b""
with open(src, "rb") as buf:
if mode.skip_source_first_line:
header = buf.readline()
src_contents, encoding, newline = decode_bytes(buf.read(), mode)
try:
dst_contents = format_file_contents(
src_contents, fast=fast, mode=mode, lines=lines
)
except NothingChanged:
return False
except JSONDecodeError:
raise ValueError(
f"File '{src}' cannot be parsed as valid Jupyter notebook."
) from None
src_contents = header.decode(encoding) + src_contents
dst_contents = header.decode(encoding) + dst_contents
if write_back == WriteBack.YES:
with open(src, "w", encoding=encoding, newline=newline) as f:
f.write(dst_contents)
elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
now = datetime.now(timezone.utc)
src_name = f"{src}\t{then}"
dst_name = f"{src}\t{now}"
if mode.is_ipynb:
diff_contents = ipynb_diff(src_contents, dst_contents, src_name, dst_name)
else:
diff_contents = diff(src_contents, dst_contents, src_name, dst_name)
if write_back == WriteBack.COLOR_DIFF:
diff_contents = color_diff(diff_contents)
with lock or nullcontext():
f = io.TextIOWrapper(
sys.stdout.buffer,
encoding=encoding,
newline=newline,
write_through=True,
)
f = wrap_stream_for_windows(f)
f.write(diff_contents)
f.detach()
return True
def format_stdin_to_stdout(
fast: bool,
*,
content: str | None = None,
write_back: WriteBack = WriteBack.NO,
mode: Mode,
lines: Collection[tuple[int, int]] = (),
) -> bool:
then = datetime.now(timezone.utc)
if content is None:
src, encoding, newline = decode_bytes(sys.stdin.buffer.read(), mode)
else:
src, encoding, newline = content, "utf-8", "\n"
dst = src
try:
dst = format_file_contents(src, fast=fast, mode=mode, lines=lines)
return True
except NothingChanged:
return False
finally:
f = io.TextIOWrapper(
sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True
)
if write_back == WriteBack.YES:
# Make sure there's a newline after the content
if dst and dst[-1] != "\n" and dst[-1] != "\r":
dst += newline
f.write(dst)
elif write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
now = datetime.now(timezone.utc)
src_name = f"STDIN\t{then}"
dst_name = f"STDOUT\t{now}"
d = diff(src, dst, src_name, dst_name)
if write_back == WriteBack.COLOR_DIFF:
d = color_diff(d)
f = wrap_stream_for_windows(f)
f.write(d)
f.detach()
def check_stability_and_equivalence(
src_contents: str,
dst_contents: str,
*,
mode: Mode,
lines: Collection[tuple[int, int]] = (),
) -> None:
try:
assert_equivalent(src_contents, dst_contents)
except ASTSafetyError:
if _target_versions_exceed_runtime(mode.target_versions):
raise ASTSafetyError(
"failed to verify equivalence of the formatted output:"
f" {_version_mismatch_message(mode.target_versions)}"
) from None
raise
assert_stable(src_contents, dst_contents, mode=mode, lines=lines)
def format_file_contents(
src_contents: str,
*,
fast: bool,
mode: Mode,
lines: Collection[tuple[int, int]] = (),
) -> FileContent:
if mode.is_ipynb:
dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode)
else:
dst_contents = format_str(src_contents, mode=mode, lines=lines)
if src_contents == dst_contents:
raise NothingChanged
if not fast and not mode.is_ipynb:
# Jupyter notebooks will already have been checked above.
check_stability_and_equivalence(
src_contents, dst_contents, mode=mode, lines=lines
)
return dst_contents
def format_cell(src: str, *, fast: bool, mode: Mode) -> str:
validate_cell(src, mode)
src_without_trailing_semicolon, has_trailing_semicolon = remove_trailing_semicolon(
src
)
try:
masked_src, replacements = mask_cell(src_without_trailing_semicolon)
except SyntaxError:
raise NothingChanged from None
masked_dst = format_str(masked_src, mode=mode)
if not fast:
check_stability_and_equivalence(masked_src, masked_dst, mode=mode)
dst_without_trailing_semicolon = unmask_cell(masked_dst, replacements)
dst = put_trailing_semicolon_back(
dst_without_trailing_semicolon, has_trailing_semicolon
)
dst = dst.rstrip("\n")
if dst == src:
raise NothingChanged from None
return dst
def validate_metadata(nb: MutableMapping[str, Any]) -> None:
language = nb.get("metadata", {}).get("language_info", {}).get("name", None)
if language is not None and language != "python":
raise NothingChanged from None
def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileContent:
if not src_contents:
raise NothingChanged
trailing_newline = src_contents[-1] == "\n"
modified = False
nb = json.loads(src_contents)
validate_metadata(nb)
for cell in nb["cells"]:
if cell.get("cell_type", None) == "code":
try:
src = "".join(cell["source"])
dst = format_cell(src, fast=fast, mode=mode)
except NothingChanged:
pass
else:
cell["source"] = dst.splitlines(keepends=True)
modified = True
if modified:
dst_contents = json.dumps(nb, indent=1, ensure_ascii=False)
if trailing_newline:
dst_contents = dst_contents + "\n"
return dst_contents
else:
raise NothingChanged
def format_str(
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
) -> str:
if lines:
lines = sanitized_lines(lines, src_contents)
if not lines:
return src_contents # Nothing to format
dst_contents = _format_str_once(src_contents, mode=mode, lines=lines)
# Forced second pass to work around optional trailing commas (becoming
# forced trailing commas on pass 2) interacting differently with optional
# parentheses. Admittedly ugly.
if src_contents != dst_contents:
if lines:
lines = adjusted_lines(lines, src_contents, dst_contents)
return _format_str_once(dst_contents, mode=mode, lines=lines)
return dst_contents
def _format_str_once(
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
) -> str:
# Use the encoding overwrite since the src_contents may contain a different
# magic encoding comment than utf-8
normalized_contents, _, newline_type = decode_bytes(
src_contents.encode("utf-8"), mode, encoding_overwrite="utf-8"
)
src_node = lib2to3_parse(
normalized_contents.lstrip(), target_versions=mode.target_versions
)
dst_blocks: list[LinesBlock] = []
if mode.target_versions:
versions = mode.target_versions
else:
future_imports = get_future_imports(src_node)
versions = detect_target_versions(src_node, future_imports=future_imports)
line_generation_features = {
feature
for feature in {
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
Feature.UNPARENTHESIZED_EXCEPT_TYPES,
Feature.T_STRINGS,
}
if supports_feature(versions, feature)
}
normalize_fmt_off(src_node, mode, lines)
if lines:
# This should be called after normalize_fmt_off.
convert_unchanged_lines(src_node, lines)
line_generator = LineGenerator(mode=mode, features=line_generation_features)
elt = EmptyLineTracker(mode=mode)
split_line_features = {
feature
for feature in {
Feature.TRAILING_COMMA_IN_CALL,
Feature.TRAILING_COMMA_IN_DEF,
}
if supports_feature(versions, feature)
}
block: LinesBlock | None = None
for current_line in line_generator.visit(src_node):
block = elt.maybe_empty_lines(current_line)
dst_blocks.append(block)
for line in transform_line(
current_line, mode=mode, features=split_line_features
):
block.content_lines.append(str(line))
if dst_blocks:
dst_blocks[-1].after = 0
dst_contents = []
for block in dst_blocks:
dst_contents.extend(block.all_lines())
if not dst_contents:
if "\n" in normalized_contents:
return newline_type
return "".join(dst_contents).replace("\n", newline_type)
def decode_bytes(
src: bytes, mode: Mode, *, encoding_overwrite: str | None = None
) -> tuple[FileContent, Encoding, NewLine]:
srcbuf = io.BytesIO(src)
# Still use detect encoding even if overrite set because otherwise lines
# might be different
encoding, lines = tokenize.detect_encoding(srcbuf.readline)
if encoding_overwrite is not None:
encoding = encoding_overwrite
if not lines:
return "", encoding, "\n"
if lines[0][-2:] == b"\r\n":
if b"\r" in lines[0][:-2]:
newline = "\r"
else:
newline = "\r\n"
elif lines[0][-1:] == b"\n":
if b"\r" in lines[0][:-1]:
newline = "\r"
else:
newline = "\n"
else:
if b"\r" in lines[0]:
newline = "\r"
else:
newline = "\n"
srcbuf.seek(0)
with io.TextIOWrapper(srcbuf, encoding) as tiow:
return tiow.read(), encoding, newline
def get_features_used(
node: Node, *, future_imports: set[str] | None = None
) -> set[Feature]:
features: set[Feature] = set()
if future_imports:
features |= {
FUTURE_FLAG_TO_FEATURE[future_import]
for future_import in future_imports
if future_import in FUTURE_FLAG_TO_FEATURE
}
for n in node.pre_order():
if n.type == token.FSTRING_START:
features.add(Feature.F_STRINGS)
elif n.type == token.TSTRING_START:
features.add(Feature.T_STRINGS)
elif (
n.type == token.RBRACE
and n.parent is not None
and any(child.type == token.EQUAL for child in n.parent.children)
):
features.add(Feature.DEBUG_F_STRINGS)
elif is_number_token(n):
if "_" in n.value:
features.add(Feature.NUMERIC_UNDERSCORES)
elif n.type == token.SLASH:
if n.parent and n.parent.type in {
syms.typedargslist,
syms.arglist,
syms.varargslist,
}:
features.add(Feature.POS_ONLY_ARGUMENTS)
elif n.type == token.COLONEQUAL:
features.add(Feature.ASSIGNMENT_EXPRESSIONS)
elif n.type == syms.decorator:
if len(n.children) > 1 and not is_simple_decorator_expression(
n.children[1]
):
features.add(Feature.RELAXED_DECORATORS)
elif (
n.type in {syms.typedargslist, syms.arglist}
and n.children
and n.children[-1].type == token.COMMA
):
if n.type == syms.typedargslist:
feature = Feature.TRAILING_COMMA_IN_DEF
else:
feature = Feature.TRAILING_COMMA_IN_CALL
for ch in n.children:
if ch.type in STARS:
features.add(feature)
if ch.type == syms.argument:
for argch in ch.children:
if argch.type in STARS:
features.add(feature)
elif (
n.type in {syms.return_stmt, syms.yield_expr}
and len(n.children) >= 2
and n.children[1].type == syms.testlist_star_expr
and any(child.type == syms.star_expr for child in n.children[1].children)
):
features.add(Feature.UNPACKING_ON_FLOW)
elif (
n.type == syms.annassign
and len(n.children) >= 4
and n.children[3].type == syms.testlist_star_expr
):
features.add(Feature.ANN_ASSIGN_EXTENDED_RHS)
elif (
n.type == syms.with_stmt
and len(n.children) > 2
and n.children[1].type == syms.atom
):
atom_children = n.children[1].children
if (
len(atom_children) == 3
and atom_children[0].type == token.LPAR
and _contains_asexpr(atom_children[1])
and atom_children[2].type == token.RPAR
):
features.add(Feature.PARENTHESIZED_CONTEXT_MANAGERS)
elif n.type == syms.match_stmt:
features.add(Feature.PATTERN_MATCHING)
elif n.type in {syms.subscriptlist, syms.trailer} and any(
child.type == syms.star_expr for child in n.children
):
features.add(Feature.VARIADIC_GENERICS)
elif (
n.type == syms.tname_star
and len(n.children) == 3
and n.children[2].type == syms.star_expr
):
features.add(Feature.VARIADIC_GENERICS)
elif n.type in (syms.type_stmt, syms.typeparams):
features.add(Feature.TYPE_PARAMS)
elif (
n.type in (syms.typevartuple, syms.paramspec, syms.typevar)
and n.children[-2].type == token.EQUAL
):
features.add(Feature.TYPE_PARAM_DEFAULTS)
elif (
n.type == syms.except_clause
and len(n.children) >= 2
and (
n.children[1].type == token.STAR or n.children[1].type == syms.testlist
)
):
is_star_except = n.children[1].type == token.STAR
if is_star_except:
features.add(Feature.EXCEPT_STAR)
# Presence of except* pushes as clause 1 index back
has_as_clause = (
len(n.children) >= is_star_except + 3
and n.children[is_star_except + 2].type == token.NAME
and n.children[is_star_except + 2].value == "as" # type: ignore
)
# If there's no 'as' clause and the except expression is a testlist.
if not has_as_clause and (
(is_star_except and n.children[2].type == syms.testlist)
or (not is_star_except and n.children[1].type == syms.testlist)
):
features.add(Feature.UNPARENTHESIZED_EXCEPT_TYPES)
return features
def _contains_asexpr(node: Node | Leaf) -> bool:
if node.type == syms.asexpr_test:
return True
elif node.type == syms.atom:
if (
len(node.children) == 3
and node.children[0].type == token.LPAR
and node.children[2].type == token.RPAR
):
return _contains_asexpr(node.children[1])
elif node.type == syms.testlist_gexp:
return any(_contains_asexpr(child) for child in node.children)
return False
def detect_target_versions(
node: Node, *, future_imports: set[str] | None = None
) -> set[TargetVersion]:
features = get_features_used(node, future_imports=future_imports)
return {
version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
}
def get_future_imports(node: Node) -> set[str]:
imports: set[str] = set()
def get_imports_from_children(children: list[LN]) -> Generator[str, None, None]:
for child in children:
if isinstance(child, Leaf):
if child.type == token.NAME:
yield child.value
elif child.type == syms.import_as_name:
orig_name = child.children[0]
assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports"
assert orig_name.type == token.NAME, "Invalid syntax parsing imports"
yield orig_name.value
elif child.type == syms.import_as_names:
yield from get_imports_from_children(child.children)
else:
raise AssertionError("Invalid syntax parsing imports")
for child in node.children:
if child.type != syms.simple_stmt:
break
first_child = child.children[0]
if isinstance(first_child, Leaf):
# Continue looking if we see a docstring; otherwise stop.
if (
len(child.children) == 2
and first_child.type == token.STRING
and child.children[1].type == token.NEWLINE
):
continue
break
elif first_child.type == syms.import_from:
module_name = first_child.children[1]
if not isinstance(module_name, Leaf) or module_name.value != "__future__":
break
imports |= set(get_imports_from_children(first_child.children[3:]))
else:
break
return imports
def _black_info() -> str:
return (
f"Black {__version__} on "
f"Python ({platform.python_implementation()}) {platform.python_version()}"
)
def assert_equivalent(src: str, dst: str) -> None:
try:
src_ast = parse_ast(src)
except Exception as exc:
raise ASTSafetyError(
"cannot use --safe with this file; failed to parse source file AST: "
f"{exc}\n"
"This could be caused by running Black with an older Python version "
"that does not support new syntax used in your source file."
) from exc
try:
dst_ast = parse_ast(dst)
except Exception as exc:
log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst)
raise ASTSafetyError(
f"INTERNAL ERROR: {_black_info()} produced invalid code: {exc}. "
"Please report a bug on https://github.com/psf/black/issues. "
f"This invalid output might be helpful: {log}"
) from None
src_ast_str = "\n".join(stringify_ast(src_ast))
dst_ast_str = "\n".join(stringify_ast(dst_ast))
if src_ast_str != dst_ast_str:
log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst"))
raise ASTSafetyError(
f"INTERNAL ERROR: {_black_info()} produced code that is not equivalent to"
" the source. Please report a bug on https://github.com/psf/black/issues."
f" This diff might be helpful: {log}"
) from None
def assert_stable(
src: str, dst: str, mode: Mode, *, lines: Collection[tuple[int, int]] = ()
) -> None:
if lines:
# Formatting specified lines requires `adjusted_lines` to map original lines
# to the formatted lines before re-formatting the previously formatted result.
# Due to less-ideal diff algorithm, some edge cases produce incorrect new line
# ranges. Hence for now, we skip the stable check.
# See https://github.com/psf/black/issues/4033 for context.
return
# We shouldn't call format_str() here, because that formats the string
# twice and may hide a bug where we bounce back and forth between two
# versions.
newdst = _format_str_once(dst, mode=mode, lines=lines)
if dst != newdst:
log = dump_to_file(
str(mode),
diff(src, dst, "source", "first pass"),
diff(dst, newdst, "first pass", "second pass"),
)
raise AssertionError(
f"INTERNAL ERROR: {_black_info()} produced different code on the second"
" pass of the formatter. Please report a bug on"
f" https://github.com/psf/black/issues. This diff might be helpful: {log}"
) from None
def patched_main() -> None:
# PyInstaller patches multiprocessing to need freeze_support() even in non-Windows
# environments so just assume we always need to call it if frozen.
if getattr(sys, "frozen", False):
from multiprocessing import freeze_support
freeze_support()
main()
if __name__ == "__main__":
patched_main() | --- +++ @@ -115,6 +115,11 @@ def read_pyproject_toml(
ctx: click.Context, param: click.Parameter, value: str | None
) -> str | None:
+ """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.
+
+ Returns the path to a successfully found and read configuration file, None
+ otherwise.
+ """
if not value:
value = find_pyproject_toml(
ctx.params.get("src", ()), ctx.params.get("stdin_filename", None)
@@ -189,6 +194,11 @@ def target_version_option_callback(
c: click.Context, p: click.Option | click.Parameter, v: tuple[str, ...]
) -> list[TargetVersion]:
+ """Compute the target versions from a --target-version flag.
+
+ This is its own function because mypy couldn't infer the type correctly
+ when it was a lambda, causing mypyc trouble.
+ """
return [TargetVersion[val.upper()] for val in v]
@@ -216,10 +226,15 @@ def enable_unstable_feature_callback(
c: click.Context, p: click.Option | click.Parameter, v: tuple[str, ...]
) -> list[Preview]:
+ """Compute the features from an --enable-unstable-feature flag."""
return [Preview[val] for val in v]
def re_compile_maybe_verbose(regex: str) -> Pattern[str]:
+ """Compile a regular expression string in `regex`.
+
+ If it contains newlines, use verbose mode.
+ """
if "\n" in regex:
regex = "(?x)" + regex
compiled: Pattern[str] = re.compile(regex)
@@ -551,6 +566,7 @@ config: str | None,
no_cache: bool,
) -> None:
+ """The uncompromising code formatter."""
ctx.ensure_object(dict)
assert sys.version_info >= (3, 10), "Black requires Python 3.10+"
@@ -756,6 +772,7 @@ report: "Report",
stdin_filename: str | None,
) -> set[Path]:
+ """Compute the set of files to be formatted."""
sources: set[Path] = set()
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
@@ -849,6 +866,13 @@ *,
lines: Collection[tuple[int, int]] = (),
) -> None:
+ """
+ Reformat and print out `content` without spawning child processes.
+ Similar to `reformat_one`, but for string content.
+
+ `fast`, `write_back`, and `mode` options are passed to
+ :func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
+ """
path = Path("<string>")
try:
changed = Changed.NO
@@ -876,6 +900,11 @@ lines: Collection[tuple[int, int]] = (),
no_cache: bool = False,
) -> None:
+ """Reformat a single file under `src` without spawning child processes.
+
+ `fast`, `write_back`, and `mode` options are passed to
+ :func:`format_file_in_place` or :func:`format_stdin_to_stdout`.
+ """
try:
changed = Changed.NO
@@ -931,6 +960,12 @@ *,
lines: Collection[tuple[int, int]] = (),
) -> bool:
+ """Format file under `src` path. Return True if changed.
+
+ If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted
+ code to the file.
+ `mode` and `fast` options are passed to :func:`format_file_contents`.
+ """
if src.suffix == ".pyi":
mode = replace(mode, is_pyi=True)
elif src.suffix == ".ipynb":
@@ -992,6 +1027,14 @@ mode: Mode,
lines: Collection[tuple[int, int]] = (),
) -> bool:
+ """Format file on stdin. Return True if changed.
+
+ If content is None, it's read from sys.stdin.
+
+ If `write_back` is YES, write reformatted code back to stdout. If it is DIFF,
+ write a diff to stdout. The `mode` argument is passed to
+ :func:`format_file_contents`.
+ """
then = datetime.now(timezone.utc)
if content is None:
@@ -1035,6 +1078,12 @@ mode: Mode,
lines: Collection[tuple[int, int]] = (),
) -> None:
+ """Perform stability and equivalence checks.
+
+ Raise AssertionError if source and destination contents are not
+ equivalent, or if a second pass of the formatter would format the
+ content differently.
+ """
try:
assert_equivalent(src_contents, dst_contents)
except ASTSafetyError:
@@ -1054,6 +1103,12 @@ mode: Mode,
lines: Collection[tuple[int, int]] = (),
) -> FileContent:
+ """Reformat contents of a file and return new contents.
+
+ If `fast` is False, additionally confirm that the reformatted code is
+ valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it.
+ `mode` is passed to :func:`format_str`.
+ """
if mode.is_ipynb:
dst_contents = format_ipynb_string(src_contents, fast=fast, mode=mode)
else:
@@ -1070,6 +1125,21 @@
def format_cell(src: str, *, fast: bool, mode: Mode) -> str:
+ """Format code in given cell of Jupyter notebook.
+
+ General idea is:
+
+ - if cell has trailing semicolon, remove it;
+ - if cell has IPython magics, mask them;
+ - format cell;
+ - reinstate IPython magics;
+ - reinstate trailing semicolon (if originally present);
+ - strip trailing newlines.
+
+ Cells with syntax errors will not be processed, as they
+ could potentially be automagics or multi-line magics, which
+ are currently not supported.
+ """
validate_cell(src, mode)
src_without_trailing_semicolon, has_trailing_semicolon = remove_trailing_semicolon(
src
@@ -1092,12 +1162,23 @@
def validate_metadata(nb: MutableMapping[str, Any]) -> None:
+ """If notebook is marked as non-Python, don't format it.
+
+ All notebook metadata fields are optional, see
+ https://nbformat.readthedocs.io/en/latest/format_description.html. So
+ if a notebook has empty metadata, we will try to parse it anyway.
+ """
language = nb.get("metadata", {}).get("language_info", {}).get("name", None)
if language is not None and language != "python":
raise NothingChanged from None
def format_ipynb_string(src_contents: str, *, fast: bool, mode: Mode) -> FileContent:
+ """Format Jupyter notebook.
+
+ Operate cell-by-cell, only on code cells, only for Python notebooks.
+ If the ``.ipynb`` originally had a trailing newline, it'll be preserved.
+ """
if not src_contents:
raise NothingChanged
@@ -1127,6 +1208,35 @@ def format_str(
src_contents: str, *, mode: Mode, lines: Collection[tuple[int, int]] = ()
) -> str:
+ """Reformat a string and return new contents.
+
+ `mode` determines formatting options, such as how many characters per line are
+ allowed. Example:
+
+ >>> import black
+ >>> print(black.format_str("def f(arg:str='')->None:...", mode=black.Mode()))
+ def f(arg: str = "") -> None:
+ ...
+
+ A more complex example:
+
+ >>> print(
+ ... black.format_str(
+ ... "def f(arg:str='')->None: hey",
+ ... mode=black.Mode(
+ ... target_versions={black.TargetVersion.PY36},
+ ... line_length=10,
+ ... string_normalization=False,
+ ... is_pyi=False,
+ ... ),
+ ... ),
+ ... )
+ def f(
+ arg: str = '',
+ ) -> None:
+ hey
+
+ """
if lines:
lines = sanitized_lines(lines, src_contents)
if not lines:
@@ -1208,6 +1318,14 @@ def decode_bytes(
src: bytes, mode: Mode, *, encoding_overwrite: str | None = None
) -> tuple[FileContent, Encoding, NewLine]:
+ """Return a tuple of (decoded_contents, encoding, newline).
+
+ `newline` is either CRLF, LF, or CR, but `decoded_contents` is decoded with
+ universal newlines (i.e. only contains LF).
+
+ Use the keyword only encoding_overwrite argument if the bytes are encoded
+ differently to their possible encoding magic comment.
+ """
srcbuf = io.BytesIO(src)
# Still use detect encoding even if overrite set because otherwise lines
@@ -1243,6 +1361,23 @@ def get_features_used(
node: Node, *, future_imports: set[str] | None = None
) -> set[Feature]:
+ """Return a set of (relatively) new Python features used in this file.
+
+ Currently looking for:
+ - f-strings;
+ - self-documenting expressions in f-strings (f"{x=}");
+ - underscores in numeric literals;
+ - trailing commas after * or ** in function signatures and calls;
+ - positional only arguments in function signatures and lambdas;
+ - assignment expression;
+ - relaxed decorator syntax;
+ - usage of __future__ flags (annotations);
+ - print / exec statements;
+ - parenthesized context managers;
+ - match statements;
+ - except* clause;
+ - variadic generics;
+ """
features: set[Feature] = set()
if future_imports:
features |= {
@@ -1386,6 +1521,7 @@
def _contains_asexpr(node: Node | Leaf) -> bool:
+ """Return True if `node` contains an as-pattern."""
if node.type == syms.asexpr_test:
return True
elif node.type == syms.atom:
@@ -1403,6 +1539,7 @@ def detect_target_versions(
node: Node, *, future_imports: set[str] | None = None
) -> set[TargetVersion]:
+ """Detect the version to target based on the nodes used."""
features = get_features_used(node, future_imports=future_imports)
return {
version for version in TargetVersion if features <= VERSION_TO_FEATURES[version]
@@ -1410,6 +1547,7 @@
def get_future_imports(node: Node) -> set[str]:
+ """Return a set of __future__ imports in the file."""
imports: set[str] = set()
def get_imports_from_children(children: list[LN]) -> Generator[str, None, None]:
@@ -1466,6 +1604,7 @@
def assert_equivalent(src: str, dst: str) -> None:
+ """Raise AssertionError if `src` and `dst` aren't equivalent."""
try:
src_ast = parse_ast(src)
except Exception as exc:
@@ -1500,6 +1639,7 @@ def assert_stable(
src: str, dst: str, mode: Mode, *, lines: Collection[tuple[int, int]] = ()
) -> None:
+ """Raise AssertionError if `dst` reformats differently the second time."""
if lines:
# Formatting specified lines requires `adjusted_lines` to map original lines
# to the formatted lines before re-formatting the previously formatted result.
@@ -1536,4 +1676,4 @@
if __name__ == "__main__":
- patched_main()+ patched_main()
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/__init__.py |
Add docstrings to make code maintainable |
import hashlib
import os
import pickle
import sys
import tempfile
from collections.abc import Iterable
from dataclasses import dataclass, field
from pathlib import Path
from typing import NamedTuple
from platformdirs import user_cache_dir
from _black_version import version as __version__
from black.mode import Mode
from black.output import err
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
class FileData(NamedTuple):
st_mtime: float
st_size: int
hash: str
def get_cache_dir() -> Path:
# NOTE: Function mostly exists as a clean way to test getting the cache directory.
default_cache_dir = user_cache_dir("black")
cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir))
cache_dir = cache_dir / __version__
return cache_dir
CACHE_DIR = get_cache_dir()
def get_cache_file(mode: Mode) -> Path:
return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
@dataclass
class Cache:
mode: Mode
cache_file: Path
file_data: dict[str, FileData] = field(default_factory=dict)
@classmethod
def read(cls, mode: Mode) -> Self:
cache_file = get_cache_file(mode)
try:
exists = cache_file.exists()
except OSError as e:
# Likely file too long; see #4172 and #4174
err(f"Unable to read cache file {cache_file} due to {e}")
return cls(mode, cache_file)
if not exists:
return cls(mode, cache_file)
with cache_file.open("rb") as fobj:
try:
data: dict[str, tuple[float, int, str]] = pickle.load(fobj)
file_data = {k: FileData(*v) for k, v in data.items()}
except (pickle.UnpicklingError, ValueError, IndexError):
return cls(mode, cache_file)
return cls(mode, cache_file, file_data)
@staticmethod
def hash_digest(path: Path) -> str:
data = path.read_bytes()
return hashlib.sha256(data).hexdigest()
@staticmethod
def get_file_data(path: Path) -> FileData:
stat = path.stat()
hash = Cache.hash_digest(path)
return FileData(stat.st_mtime, stat.st_size, hash)
def is_changed(self, source: Path) -> bool:
res_src = source.resolve()
old = self.file_data.get(str(res_src))
if old is None:
return True
st = res_src.stat()
if st.st_size != old.st_size:
return True
if st.st_mtime != old.st_mtime:
new_hash = Cache.hash_digest(res_src)
if new_hash != old.hash:
return True
return False
def filtered_cached(self, sources: Iterable[Path]) -> tuple[set[Path], set[Path]]:
changed: set[Path] = set()
done: set[Path] = set()
for src in sources:
if self.is_changed(src):
changed.add(src)
else:
done.add(src)
return changed, done
def write(self, sources: Iterable[Path]) -> None:
self.file_data.update(
**{str(src.resolve()): Cache.get_file_data(src) for src in sources}
)
try:
CACHE_DIR.mkdir(parents=True, exist_ok=True)
with tempfile.NamedTemporaryFile(
dir=str(self.cache_file.parent), delete=False
) as f:
# We store raw tuples in the cache because it's faster.
data: dict[str, tuple[float, int, str]] = {
k: (*v,) for k, v in self.file_data.items()
}
pickle.dump(data, f, protocol=4)
os.replace(f.name, self.cache_file)
except OSError:
pass | --- +++ @@ -1,3 +1,4 @@+"""Caching of formatted files with feature-based invalidation."""
import hashlib
import os
@@ -28,6 +29,15 @@
def get_cache_dir() -> Path:
+ """Get the cache directory used by black.
+
+ Users can customize this directory on all systems using `BLACK_CACHE_DIR`
+ environment variable. By default, the cache directory is the user cache directory
+ under the black application.
+
+ This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid
+ repeated calls.
+ """
# NOTE: Function mostly exists as a clean way to test getting the cache directory.
default_cache_dir = user_cache_dir("black")
cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir))
@@ -50,6 +60,11 @@
@classmethod
def read(cls, mode: Mode) -> Self:
+ """Read the cache if it exists and is well-formed.
+
+ If it is not well-formed, the call to write later should
+ resolve the issue.
+ """
cache_file = get_cache_file(mode)
try:
exists = cache_file.exists()
@@ -71,18 +86,21 @@
@staticmethod
def hash_digest(path: Path) -> str:
+ """Return hash digest for path."""
data = path.read_bytes()
return hashlib.sha256(data).hexdigest()
@staticmethod
def get_file_data(path: Path) -> FileData:
+ """Return file data for path."""
stat = path.stat()
hash = Cache.hash_digest(path)
return FileData(stat.st_mtime, stat.st_size, hash)
def is_changed(self, source: Path) -> bool:
+ """Check if source has changed compared to cached version."""
res_src = source.resolve()
old = self.file_data.get(str(res_src))
if old is None:
@@ -98,6 +116,11 @@ return False
def filtered_cached(self, sources: Iterable[Path]) -> tuple[set[Path], set[Path]]:
+ """Split an iterable of paths in `sources` into two sets.
+
+ The first contains paths of files that modified on disk or are not in the
+ cache. The other contains paths to non-modified files.
+ """
changed: set[Path] = set()
done: set[Path] = set()
for src in sources:
@@ -108,6 +131,7 @@ return changed, done
def write(self, sources: Iterable[Path]) -> None:
+ """Update the cache file data and write a new cache file."""
self.file_data.update(
**{str(src.resolve()): Cache.get_file_data(src) for src in sources}
)
@@ -123,4 +147,4 @@ pickle.dump(data, f, protocol=4)
os.replace(f.name, self.cache_file)
except OSError:
- pass+ pass
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/cache.py |
Add docstrings for utility scripts | #
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import re
import string
from importlib.metadata import version
from pathlib import Path
from sphinx.application import Sphinx
CURRENT_DIR = Path(__file__).parent
def make_pypi_svg(version: str) -> None:
template: Path = CURRENT_DIR / "_static" / "pypi_template.svg"
target: Path = CURRENT_DIR / "_static" / "pypi.svg"
with open(str(template), encoding="utf8") as f:
svg: str = string.Template(f.read()).substitute(version=version)
with open(str(target), "w", encoding="utf8") as f:
f.write(svg)
def replace_pr_numbers_with_links(content: str) -> str:
return re.sub(r"#(\d+)", r"[#\1](https://github.com/psf/black/pull/\1)", content)
def handle_include_read(
app: Sphinx,
relative_path: Path,
parent_docname: str,
content: list[str],
) -> None:
if parent_docname == "change_log":
content[0] = replace_pr_numbers_with_links(content[0])
def setup(app: Sphinx) -> None:
app.connect("include-read", handle_include_read)
# Necessary so Click doesn't hit an encode error when called by
# sphinxcontrib-programoutput on Windows.
os.putenv("pythonioencoding", "utf-8")
# -- Project information -----------------------------------------------------
project = "Black"
copyright = "2018-Present, Łukasz Langa and contributors to Black"
author = "Łukasz Langa and contributors to Black"
# Autopopulate version
# The version, including alpha/beta/rc tags, but not commit hash and datestamps
release = version("black").split("+")[0]
# The short X.Y version.
version = release
for sp in "abcfr":
version = version.split(sp)[0]
make_pypi_svg(release)
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"myst_parser",
"sphinxcontrib.programoutput",
"sphinx_copybutton",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = [".rst", ".md"]
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "en"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# We need headers to be linkable to so ask MyST-Parser to autogenerate anchor IDs for
# headers up to and including level 3.
myst_heading_anchors = 3
# Prettier support formatting some MyST syntax but not all, so let's disable the
# unsupported yet still enabled by default ones.
myst_disable_syntax = [
"colon_fence",
"myst_block_break",
"myst_line_comment",
"math_block",
]
# Optional MyST Syntaxes
myst_enable_extensions = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_logo = "_static/logo2-readme.png"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "blackdoc"
# -- Options for LaTeX output ------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(
master_doc,
"black.tex",
"Documentation for Black",
"Łukasz Langa and contributors to Black",
"manual",
)]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "black", "Documentation for Black", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [(
master_doc,
"Black",
"Documentation for Black",
author,
"Black",
"The uncompromising Python code formatter",
"Miscellaneous",
)]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
autodoc_member_order = "bysource"
# -- sphinx-copybutton configuration ----------------------------------------
copybutton_prompt_text = (
r">>> |\.\.\. |> |\$ |\# | In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
)
copybutton_prompt_is_regexp = True
copybutton_remove_prompts = True
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"<name>": ("https://docs.python.org/3/", None)} | --- +++ @@ -33,6 +33,7 @@
def replace_pr_numbers_with_links(content: str) -> str:
+ """Replaces all PR numbers with the corresponding GitHub link."""
return re.sub(r"#(\d+)", r"[#\1](https://github.com/psf/black/pull/\1)", content)
@@ -42,11 +43,13 @@ parent_docname: str,
content: list[str],
) -> None:
+ """Handler for the include-read sphinx event."""
if parent_docname == "change_log":
content[0] = replace_pr_numbers_with_links(content[0])
def setup(app: Sphinx) -> None:
+ """Sets up a minimal sphinx extension."""
app.connect("include-read", handle_include_read)
@@ -229,4 +232,4 @@ # -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {"<name>": ("https://docs.python.org/3/", None)}+intersphinx_mapping = {"<name>": ("https://docs.python.org/3/", None)}
| https://raw.githubusercontent.com/psf/black/HEAD/docs/conf.py |
Generate docstrings with parameter types |
from __future__ import annotations
import asyncio
import logging
import os
import signal
import sys
import traceback
from collections.abc import Iterable
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
from multiprocessing import Manager
from pathlib import Path
from typing import Any
from mypy_extensions import mypyc_attr
from black import WriteBack, format_file_in_place
from black.cache import Cache
from black.mode import Mode
from black.output import err
from black.report import Changed, Report
def maybe_use_uvloop() -> asyncio.AbstractEventLoop:
try:
if sys.platform != "win32":
import uvloop
return uvloop.new_event_loop()
else:
import winloop
return winloop.new_event_loop()
except ImportError:
return asyncio.new_event_loop()
def cancel(tasks: Iterable[asyncio.Future[Any]]) -> None:
err("Aborted!")
for task in tasks:
task.cancel()
def shutdown(loop: asyncio.AbstractEventLoop) -> None:
try:
# This part is borrowed from asyncio/runners.py in Python 3.7b2.
to_cancel = [task for task in asyncio.all_tasks(loop) if not task.done()]
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
finally:
# `concurrent.futures.Future` objects cannot be cancelled once they
# are already running. There might be some when the `shutdown()` happened.
# Silence their logger's spew about the event loop being closed.
cf_logger = logging.getLogger("concurrent.futures")
cf_logger.setLevel(logging.CRITICAL)
loop.close()
# diff-shades depends on being to monkeypatch this function to operate. I know it's
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
@mypyc_attr(patchable=True)
def reformat_many(
sources: set[Path],
fast: bool,
write_back: WriteBack,
mode: Mode,
report: Report,
workers: int | None,
no_cache: bool = False,
) -> None:
if workers is None:
workers = int(os.environ.get("BLACK_NUM_WORKERS", 0))
workers = workers or os.cpu_count() or 1
if sys.platform == "win32":
# Work around https://bugs.python.org/issue26903
workers = min(workers, 60)
if getattr(sys, "frozen", False):
# In frozen builds (e.g. PyInstaller), avoid spawning worker processes (i.e.
# avoid using ProcessPoolExecutor) to prevent shutdown errors when workers
# try to import modules after cleanup begins.
# See https://github.com/psf/black/issues/4823
workers = 1
executor: Executor | None = None
if workers > 1:
try:
executor = ProcessPoolExecutor(max_workers=workers)
except (ImportError, NotImplementedError, OSError):
# we arrive here if the underlying system does not support multi-processing
# like in AWS Lambda or Termux, in which case we gracefully fallback to
# a ThreadPoolExecutor with just a single worker (more workers would not do
# us any good due to the Global Interpreter Lock)
pass
if executor is None:
executor = ThreadPoolExecutor(max_workers=1)
loop = maybe_use_uvloop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(
schedule_formatting(
sources=sources,
fast=fast,
write_back=write_back,
mode=mode,
report=report,
loop=loop,
executor=executor,
no_cache=no_cache,
)
)
finally:
try:
shutdown(loop)
finally:
asyncio.set_event_loop(None)
if executor is not None:
executor.shutdown()
async def schedule_formatting(
sources: set[Path],
fast: bool,
write_back: WriteBack,
mode: Mode,
report: Report,
loop: asyncio.AbstractEventLoop,
executor: Executor,
no_cache: bool = False,
) -> None:
cache = None if no_cache else Cache.read(mode)
if cache is not None and write_back not in (
WriteBack.DIFF,
WriteBack.COLOR_DIFF,
):
sources, cached = cache.filtered_cached(sources)
for src in sorted(cached):
report.done(src, Changed.CACHED)
if not sources:
return
cancelled = []
sources_to_cache = []
lock = None
manager = None
if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
# For diff output, we need locks to ensure we don't interleave output
# from different processes.
manager = Manager()
lock = manager.Lock()
try:
tasks = {
asyncio.ensure_future(
loop.run_in_executor(
executor, format_file_in_place, src, fast, mode, write_back, lock
)
): src
for src in sorted(sources)
}
pending = tasks.keys()
try:
loop.add_signal_handler(signal.SIGINT, cancel, pending)
loop.add_signal_handler(signal.SIGTERM, cancel, pending)
except NotImplementedError:
# There are no good alternatives for these on Windows.
pass
while pending:
done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
for task in done:
src = tasks.pop(task)
if task.cancelled():
cancelled.append(task)
elif exc := task.exception():
if report.verbose:
traceback.print_exception(type(exc), exc, exc.__traceback__)
report.failed(src, str(exc))
else:
changed = Changed.YES if task.result() else Changed.NO
# If the file was written back or was successfully checked as
# well-formatted, store this information in the cache.
if write_back is WriteBack.YES or (
write_back is WriteBack.CHECK and changed is Changed.NO
):
sources_to_cache.append(src)
report.done(src, changed)
if cancelled:
await asyncio.gather(*cancelled, return_exceptions=True)
if sources_to_cache and not no_cache and cache is not None:
cache.write(sources_to_cache)
finally:
if manager is not None:
manager.shutdown() | --- +++ @@ -1,3 +1,8 @@+"""
+Formatting many files at once via multiprocessing. Contains entrypoint and utilities.
+
+NOTE: this module is only imported if we need to format several files at once.
+"""
from __future__ import annotations
@@ -23,6 +28,12 @@
def maybe_use_uvloop() -> asyncio.AbstractEventLoop:
+ """If our environment has uvloop or winloop installed we use it otherwise
+ a normal asyncio eventloop is called as fallback.
+
+ This is called only from command-line entry points to avoid
+ interfering with the parent process if Black is used as a library.
+ """
try:
if sys.platform != "win32":
import uvloop
@@ -37,12 +48,14 @@
def cancel(tasks: Iterable[asyncio.Future[Any]]) -> None:
+ """asyncio signal handler that cancels all `tasks` and reports to stderr."""
err("Aborted!")
for task in tasks:
task.cancel()
def shutdown(loop: asyncio.AbstractEventLoop) -> None:
+ """Cancel all pending tasks on `loop`, wait for them, and close the loop."""
try:
# This part is borrowed from asyncio/runners.py in Python 3.7b2.
to_cancel = [task for task in asyncio.all_tasks(loop) if not task.done()]
@@ -73,6 +86,7 @@ workers: int | None,
no_cache: bool = False,
) -> None:
+ """Reformat multiple files using a ProcessPoolExecutor."""
if workers is None:
workers = int(os.environ.get("BLACK_NUM_WORKERS", 0))
@@ -135,6 +149,13 @@ executor: Executor,
no_cache: bool = False,
) -> None:
+ """Run formatting of `sources` in parallel using the provided `executor`.
+
+ (Use ProcessPoolExecutors for actual parallelism.)
+
+ `write_back`, `fast`, and `mode` options are passed to
+ :func:`format_file_in_place`.
+ """
cache = None if no_cache else Cache.read(mode)
if cache is not None and write_back not in (
WriteBack.DIFF,
@@ -197,4 +218,4 @@ cache.write(sources_to_cache)
finally:
if manager is not None:
- manager.shutdown()+ manager.shutdown()
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/concurrency.py |
Annotate my code with docstrings | import re
from collections.abc import Collection, Iterator
from dataclasses import dataclass
from functools import lru_cache
from typing import Final, Union
from black.mode import Mode
from black.nodes import (
CLOSING_BRACKETS,
STANDALONE_COMMENT,
STATEMENT,
WHITESPACE,
container_of,
first_leaf_of,
is_type_comment_string,
make_simple_prefix,
preceding_leaf,
syms,
)
from blib2to3.pgen2 import token
from blib2to3.pytree import Leaf, Node
# types
LN = Union[Leaf, Node]
FMT_OFF: Final = {"# fmt: off", "# fmt:off", "# yapf: disable"}
FMT_SKIP: Final = {"# fmt: skip", "# fmt:skip"}
FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"}
# Compound statements we care about for fmt: skip handling
# (excludes except_clause and case_block which aren't standalone compound statements)
_COMPOUND_STATEMENTS: Final = STATEMENT - {syms.except_clause, syms.case_block}
COMMENT_EXCEPTIONS = " !:#'"
_COMMENT_PREFIX = "# "
_COMMENT_LIST_SEPARATOR = ";"
@dataclass
class ProtoComment:
type: int # token.COMMENT or STANDALONE_COMMENT
value: str # content of the comment
newlines: int # how many newlines before the comment
consumed: int # how many characters of the original leaf's prefix did we consume
form_feed: bool # is there a form feed before the comment
leading_whitespace: str # leading whitespace before the comment, if any
def generate_comments(leaf: LN, mode: Mode) -> Iterator[Leaf]:
total_consumed = 0
for pc in list_comments(
leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER, mode=mode
):
total_consumed = pc.consumed
prefix = make_simple_prefix(pc.newlines, pc.form_feed)
yield Leaf(pc.type, pc.value, prefix=prefix)
normalize_trailing_prefix(leaf, total_consumed)
@lru_cache(maxsize=4096)
def list_comments(prefix: str, *, is_endmarker: bool, mode: Mode) -> list[ProtoComment]:
result: list[ProtoComment] = []
if not prefix or "#" not in prefix:
return result
consumed = 0
nlines = 0
ignored_lines = 0
form_feed = False
for index, full_line in enumerate(re.split("\r?\n|\r", prefix)):
consumed += len(full_line) + 1 # adding the length of the split '\n'
match = re.match(r"^(\s*)(\S.*|)$", full_line)
assert match
whitespace, line = match.groups()
if not line:
nlines += 1
if "\f" in full_line:
form_feed = True
if not line.startswith("#"):
# Escaped newlines outside of a comment are not really newlines at
# all. We treat a single-line comment following an escaped newline
# as a simple trailing comment.
if line.endswith("\\"):
ignored_lines += 1
continue
if index == ignored_lines and not is_endmarker:
comment_type = token.COMMENT # simple trailing comment
else:
comment_type = STANDALONE_COMMENT
comment = make_comment(line, mode=mode)
result.append(
ProtoComment(
type=comment_type,
value=comment,
newlines=nlines,
consumed=consumed,
form_feed=form_feed,
leading_whitespace=whitespace,
)
)
form_feed = False
nlines = 0
return result
def normalize_trailing_prefix(leaf: LN, total_consumed: int) -> None:
remainder = leaf.prefix[total_consumed:]
if "\\" not in remainder:
nl_count = remainder.count("\n")
form_feed = "\f" in remainder and remainder.endswith("\n")
leaf.prefix = make_simple_prefix(nl_count, form_feed)
return
leaf.prefix = ""
def make_comment(content: str, mode: Mode) -> str:
content = content.rstrip()
if not content:
return "#"
# Preserve comments with fmt directives exactly as-is
if content.startswith("#") and contains_fmt_directive(content):
return content
if content[0] == "#":
content = content[1:]
if (
content
and content[0] == "\N{NO-BREAK SPACE}"
and not is_type_comment_string("# " + content.lstrip(), mode=mode)
):
content = " " + content[1:] # Replace NBSP by a simple space
if (
content
and "\N{NO-BREAK SPACE}" not in content
and is_type_comment_string("#" + content, mode=mode)
):
type_part, value_part = content.split(":", 1)
content = type_part.strip() + ": " + value_part.strip()
if content and content[0] not in COMMENT_EXCEPTIONS:
content = " " + content
return "#" + content
def normalize_fmt_off(
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
) -> None:
try_again = True
while try_again:
try_again = convert_one_fmt_off_pair(node, mode, lines)
def _should_process_fmt_comment(
comment: ProtoComment, leaf: Leaf
) -> tuple[bool, bool, bool]:
is_fmt_off = contains_fmt_directive(comment.value, FMT_OFF)
is_fmt_skip = contains_fmt_directive(comment.value, FMT_SKIP)
if not is_fmt_off and not is_fmt_skip:
return False, False, False
# Invalid use when `# fmt: off` is applied before a closing bracket
if is_fmt_off and leaf.type in CLOSING_BRACKETS:
return False, False, False
return True, is_fmt_off, is_fmt_skip
def _is_valid_standalone_fmt_comment(
comment: ProtoComment, leaf: Leaf, is_fmt_off: bool, is_fmt_skip: bool
) -> bool:
if comment.type == STANDALONE_COMMENT:
return True
prev = preceding_leaf(leaf)
if not prev:
return True
# Treat STANDALONE_COMMENT nodes as whitespace for check
if is_fmt_off and prev.type not in WHITESPACE and prev.type != STANDALONE_COMMENT:
return False
if is_fmt_skip and prev.type in WHITESPACE:
return False
return True
def _handle_comment_only_fmt_block(
leaf: Leaf,
comment: ProtoComment,
previous_consumed: int,
mode: Mode,
) -> bool:
all_comments = list_comments(leaf.prefix, is_endmarker=False, mode=mode)
# Find the first fmt:off and its matching fmt:on
fmt_off_idx = None
fmt_on_idx = None
for idx, c in enumerate(all_comments):
if fmt_off_idx is None and contains_fmt_directive(c.value, FMT_OFF):
fmt_off_idx = idx
if (
fmt_off_idx is not None
and idx > fmt_off_idx
and contains_fmt_directive(c.value, FMT_ON)
):
fmt_on_idx = idx
break
# Only proceed if we found both directives
if fmt_on_idx is None or fmt_off_idx is None:
return False
comment = all_comments[fmt_off_idx]
fmt_on_comment = all_comments[fmt_on_idx]
original_prefix = leaf.prefix
# Build the hidden value
start_pos = comment.consumed
end_pos = fmt_on_comment.consumed
content_between_and_fmt_on = original_prefix[start_pos:end_pos]
hidden_value = comment.value + "\n" + content_between_and_fmt_on
if hidden_value.endswith("\n"):
hidden_value = hidden_value[:-1]
# Build the standalone comment prefix - preserve all content before fmt:off
# including any comments that precede it
if fmt_off_idx == 0:
# No comments before fmt:off, use previous_consumed
pre_fmt_off_consumed = previous_consumed
else:
# Use the consumed position of the last comment before fmt:off
# This preserves all comments and content before the fmt:off directive
pre_fmt_off_consumed = all_comments[fmt_off_idx - 1].consumed
standalone_comment_prefix = (
original_prefix[:pre_fmt_off_consumed] + "\n" * comment.newlines
)
fmt_off_prefix = original_prefix.split(comment.value)[0]
if "\n" in fmt_off_prefix:
fmt_off_prefix = fmt_off_prefix.split("\n")[-1]
standalone_comment_prefix += fmt_off_prefix
# Update leaf prefix
leaf.prefix = original_prefix[fmt_on_comment.consumed :]
# Insert the STANDALONE_COMMENT
parent = leaf.parent
assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (prefix only)"
leaf_idx = None
for idx, child in enumerate(parent.children):
if child is leaf:
leaf_idx = idx
break
assert leaf_idx is not None, "INTERNAL ERROR: fmt: on/off handling (leaf index)"
parent.insert_child(
leaf_idx,
Leaf(
STANDALONE_COMMENT,
hidden_value,
prefix=standalone_comment_prefix,
fmt_pass_converted_first_leaf=None,
),
)
return True
def convert_one_fmt_off_pair(
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
) -> bool:
for leaf in node.leaves():
# Skip STANDALONE_COMMENT nodes that were created by fmt:off/on/skip processing
# to avoid reprocessing them in subsequent iterations
if leaf.type == STANDALONE_COMMENT and hasattr(
leaf, "fmt_pass_converted_first_leaf"
):
continue
previous_consumed = 0
for comment in list_comments(leaf.prefix, is_endmarker=False, mode=mode):
should_process, is_fmt_off, is_fmt_skip = _should_process_fmt_comment(
comment, leaf
)
if not should_process:
previous_consumed = comment.consumed
continue
if not _is_valid_standalone_fmt_comment(
comment, leaf, is_fmt_off, is_fmt_skip
):
previous_consumed = comment.consumed
continue
ignored_nodes = list(generate_ignored_nodes(leaf, comment, mode))
# Handle comment-only blocks
if not ignored_nodes and is_fmt_off:
if _handle_comment_only_fmt_block(
leaf, comment, previous_consumed, mode
):
return True
continue
# Need actual nodes to process
if not ignored_nodes:
continue
# Handle regular fmt blocks
_handle_regular_fmt_block(
ignored_nodes,
comment,
previous_consumed,
is_fmt_skip,
lines,
leaf,
)
return True
return False
def _handle_regular_fmt_block(
ignored_nodes: list[LN],
comment: ProtoComment,
previous_consumed: int,
is_fmt_skip: bool,
lines: Collection[tuple[int, int]],
leaf: Leaf,
) -> None:
first = ignored_nodes[0] # Can be a container node with the `leaf`.
parent = first.parent
prefix = first.prefix
if contains_fmt_directive(comment.value, FMT_OFF):
first.prefix = prefix[comment.consumed :]
if is_fmt_skip:
first.prefix = ""
standalone_comment_prefix = prefix
else:
standalone_comment_prefix = prefix[:previous_consumed] + "\n" * comment.newlines
# Ensure STANDALONE_COMMENT nodes have trailing newlines when stringified
# This prevents multiple fmt: skip comments from being concatenated on one line
parts = []
for node in ignored_nodes:
if isinstance(node, Leaf) and node.type == STANDALONE_COMMENT:
# Add newline after STANDALONE_COMMENT Leaf
node_str = str(node)
if not node_str.endswith("\n"):
node_str += "\n"
parts.append(node_str)
elif isinstance(node, Node):
# For nodes that might contain STANDALONE_COMMENT leaves,
# we need custom stringify
has_standalone = any(
leaf.type == STANDALONE_COMMENT for leaf in node.leaves()
)
if has_standalone:
# Stringify node with STANDALONE_COMMENT leaves having trailing newlines
def stringify_node(n: LN) -> str:
if isinstance(n, Leaf):
if n.type == STANDALONE_COMMENT:
result = n.prefix + n.value
if not result.endswith("\n"):
result += "\n"
return result
return str(n)
else:
# For nested nodes, recursively process children
return "".join(stringify_node(child) for child in n.children)
parts.append(stringify_node(node))
else:
parts.append(str(node))
else:
parts.append(str(node))
hidden_value = "".join(parts)
comment_lineno = leaf.lineno - comment.newlines
if contains_fmt_directive(comment.value, FMT_OFF):
fmt_off_prefix = ""
if len(lines) > 0 and not any(
line[0] <= comment_lineno <= line[1] for line in lines
):
# keeping indentation of comment by preserving original whitespaces.
fmt_off_prefix = prefix.split(comment.value)[0]
if "\n" in fmt_off_prefix:
fmt_off_prefix = fmt_off_prefix.split("\n")[-1]
standalone_comment_prefix += fmt_off_prefix
hidden_value = comment.value + "\n" + hidden_value
if is_fmt_skip:
hidden_value += comment.leading_whitespace + comment.value
if hidden_value.endswith("\n"):
# That happens when one of the `ignored_nodes` ended with a NEWLINE
# leaf (possibly followed by a DEDENT).
hidden_value = hidden_value[:-1]
first_idx: int | None = None
for ignored in ignored_nodes:
index = ignored.remove()
if first_idx is None:
first_idx = index
assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
parent.insert_child(
first_idx,
Leaf(
STANDALONE_COMMENT,
hidden_value,
prefix=standalone_comment_prefix,
fmt_pass_converted_first_leaf=first_leaf_of(first),
),
)
def generate_ignored_nodes(
leaf: Leaf, comment: ProtoComment, mode: Mode
) -> Iterator[LN]:
if contains_fmt_directive(comment.value, FMT_SKIP):
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment, mode)
return
container: LN | None = container_of(leaf)
while container is not None and container.type != token.ENDMARKER:
if is_fmt_on(container, mode=mode):
return
# fix for fmt: on in children
if children_contains_fmt_on(container, mode=mode):
for index, child in enumerate(container.children):
if isinstance(child, Leaf) and is_fmt_on(child, mode=mode):
if child.type in CLOSING_BRACKETS:
# This means `# fmt: on` is placed at a different bracket level
# than `# fmt: off`. This is an invalid use, but as a courtesy,
# we include this closing bracket in the ignored nodes.
# The alternative is to fail the formatting.
yield child
return
if (
child.type == token.INDENT
and index < len(container.children) - 1
and children_contains_fmt_on(
container.children[index + 1], mode=mode
)
):
# This means `# fmt: on` is placed right after an indentation
# level, and we shouldn't swallow the previous INDENT token.
return
if children_contains_fmt_on(child, mode=mode):
return
yield child
else:
if container.type == token.DEDENT and container.next_sibling is None:
# This can happen when there is no matching `# fmt: on` comment at the
# same level as `# fmt: on`. We need to keep this DEDENT.
return
yield container
container = container.next_sibling
def _find_compound_statement_context(parent: Node) -> Node | None:
if parent.type != syms.simple_stmt:
return None
if not isinstance(parent.parent, Node):
return None
# Case 1: Expanded form after Black's initial formatting pass.
# The one-liner has been split across multiple lines:
# if True:
# print("a"); print("b") # fmt: skip
# Structure: compound_stmt -> suite -> simple_stmt
if (
parent.parent.type == syms.suite
and isinstance(parent.parent.parent, Node)
and parent.parent.parent.type in _COMPOUND_STATEMENTS
):
return parent.parent
# Case 2: Original one-line form from the input source.
# The statement is still on a single line:
# if True: print("a"); print("b") # fmt: skip
# Structure: compound_stmt -> simple_stmt
if parent.parent.type in _COMPOUND_STATEMENTS:
return parent
return None
def _should_keep_compound_statement_inline(
body_node: Node, simple_stmt_parent: Node
) -> bool:
# Check if there are semicolons in the body
for leaf in body_node.leaves():
if leaf.type == token.SEMI:
# Verify it's a single-line body (one simple_stmt)
if body_node.type == syms.suite:
# After formatting: check suite has one simple_stmt child
simple_stmts = [
child
for child in body_node.children
if child.type == syms.simple_stmt
]
return len(simple_stmts) == 1 and simple_stmts[0] is simple_stmt_parent
else:
# Original form: body_node IS the simple_stmt
return body_node is simple_stmt_parent
return False
def _get_compound_statement_header(
body_node: Node, simple_stmt_parent: Node
) -> list[LN]:
if not _should_keep_compound_statement_inline(body_node, simple_stmt_parent):
return []
# Get the compound statement (parent of body)
compound_stmt = body_node.parent
if compound_stmt is None or compound_stmt.type not in _COMPOUND_STATEMENTS:
return []
# Collect all header leaves before the body
header_leaves: list[LN] = []
for child in compound_stmt.children:
if child is body_node:
break
if isinstance(child, Leaf):
if child.type not in (token.NEWLINE, token.INDENT):
header_leaves.append(child)
else:
header_leaves.extend(child.leaves())
return header_leaves
def _generate_ignored_nodes_from_fmt_skip(
leaf: Leaf, comment: ProtoComment, mode: Mode
) -> Iterator[LN]:
prev_sibling = leaf.prev_sibling
parent = leaf.parent
ignored_nodes: list[LN] = []
# Need to properly format the leaf prefix to compare it to comment.value,
# which is also formatted
comments = list_comments(leaf.prefix, is_endmarker=False, mode=mode)
if not comments or comment.value != comments[0].value:
return
if not prev_sibling and parent:
prev_sibling = parent.prev_sibling
if prev_sibling is not None:
leaf.prefix = leaf.prefix[comment.consumed :]
# Generates the nodes to be ignored by `fmt: skip`.
# Nodes to ignore are the ones on the same line as the
# `# fmt: skip` comment, excluding the `# fmt: skip`
# node itself.
# Traversal process (starting at the `# fmt: skip` node):
# 1. Move to the `prev_sibling` of the current node.
# 2. If `prev_sibling` has children, go to its rightmost leaf.
# 3. If there's no `prev_sibling`, move up to the parent
# node and repeat.
# 4. Continue until:
# a. You encounter an `INDENT` or `NEWLINE` node (indicates
# start of the line).
# b. You reach the root node.
# Include all visited LEAVES in the ignored list, except INDENT
# or NEWLINE leaves.
current_node = prev_sibling
ignored_nodes = [current_node]
if current_node.prev_sibling is None and current_node.parent is not None:
current_node = current_node.parent
# Track seen nodes to detect cycles that can occur after tree modifications
seen_nodes = {id(current_node)}
while "\n" not in current_node.prefix and current_node.prev_sibling is not None:
leaf_nodes = list(current_node.prev_sibling.leaves())
next_node = leaf_nodes[-1] if leaf_nodes else current_node
# Detect infinite loop - if we've seen this node before, stop
# This can happen when STANDALONE_COMMENT nodes are inserted
# during processing
if id(next_node) in seen_nodes:
break
current_node = next_node
seen_nodes.add(id(current_node))
# Stop if we encounter a STANDALONE_COMMENT created by fmt processing
if (
isinstance(current_node, Leaf)
and current_node.type == STANDALONE_COMMENT
and hasattr(current_node, "fmt_pass_converted_first_leaf")
):
break
if (
current_node.type in CLOSING_BRACKETS
and current_node.parent
and current_node.parent.type == syms.atom
):
current_node = current_node.parent
if current_node.type in (token.NEWLINE, token.INDENT):
current_node.prefix = ""
break
if current_node.type == token.DEDENT:
break
# Special case for with expressions
# Without this, we can stuck inside the asexpr_test's children's children
if (
current_node.parent
and current_node.parent.type == syms.asexpr_test
and current_node.parent.parent
and current_node.parent.parent.type == syms.with_stmt
):
current_node = current_node.parent
ignored_nodes.insert(0, current_node)
if current_node.prev_sibling is None and current_node.parent is not None:
current_node = current_node.parent
# Special handling for compound statements with semicolon-separated bodies
if isinstance(parent, Node):
body_node = _find_compound_statement_context(parent)
if body_node is not None:
header_nodes = _get_compound_statement_header(body_node, parent)
if header_nodes:
ignored_nodes = header_nodes + ignored_nodes
yield from ignored_nodes
elif (
parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE
):
# The `# fmt: skip` is on the colon line of the if/while/def/class/...
# statements. The ignored nodes should be previous siblings of the
# parent suite node.
leaf.prefix = ""
parent_sibling = parent.prev_sibling
while parent_sibling is not None and parent_sibling.type != syms.suite:
ignored_nodes.insert(0, parent_sibling)
parent_sibling = parent_sibling.prev_sibling
# Special case for `async_stmt` where the ASYNC token is on the
# grandparent node.
grandparent = parent.parent
if (
grandparent is not None
and grandparent.prev_sibling is not None
and grandparent.prev_sibling.type == token.ASYNC
):
ignored_nodes.insert(0, grandparent.prev_sibling)
yield from iter(ignored_nodes)
def is_fmt_on(container: LN, mode: Mode) -> bool:
fmt_on = False
for comment in list_comments(container.prefix, is_endmarker=False, mode=mode):
if contains_fmt_directive(comment.value, FMT_ON):
fmt_on = True
elif contains_fmt_directive(comment.value, FMT_OFF):
fmt_on = False
return fmt_on
def children_contains_fmt_on(container: LN, mode: Mode) -> bool:
for child in container.children:
leaf = first_leaf_of(child)
if leaf is not None and is_fmt_on(leaf, mode=mode):
return True
return False
def contains_pragma_comment(comment_list: list[Leaf]) -> bool:
for comment in comment_list:
if comment.value.startswith(("# type:", "# noqa", "# pylint:")):
return True
return False
def contains_fmt_directive(
comment_line: str, directives: set[str] = FMT_OFF | FMT_ON | FMT_SKIP
) -> bool:
semantic_comment_blocks = [
comment_line,
*[
_COMMENT_PREFIX + comment.strip()
for comment in comment_line.split(_COMMENT_PREFIX)[1:]
],
*[
_COMMENT_PREFIX + comment.strip()
for comment in comment_line.strip(_COMMENT_PREFIX).split(
_COMMENT_LIST_SEPARATOR
)
],
]
return any(comment in directives for comment in semantic_comment_blocks) | --- +++ @@ -38,6 +38,15 @@
@dataclass
class ProtoComment:
+ """Describes a piece of syntax that is a comment.
+
+ It's not a :class:`blib2to3.pytree.Leaf` so that:
+
+ * it can be cached (`Leaf` objects should not be reused more than once as
+ they store their lineno, column, prefix, and parent information);
+ * `newlines` and `consumed` fields are kept separate from the `value`. This
+ simplifies handling of special marker comments like ``# fmt: off/on``.
+ """
type: int # token.COMMENT or STANDALONE_COMMENT
value: str # content of the comment
@@ -48,6 +57,24 @@
def generate_comments(leaf: LN, mode: Mode) -> Iterator[Leaf]:
+ """Clean the prefix of the `leaf` and generate comments from it, if any.
+
+ Comments in lib2to3 are shoved into the whitespace prefix. This happens
+ in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
+ move because it does away with modifying the grammar to include all the
+ possible places in which comments can be placed.
+
+ The sad consequence for us though is that comments don't "belong" anywhere.
+ This is why this function generates simple parentless Leaf objects for
+ comments. We simply don't know what the correct parent should be.
+
+ No matter though, we can live without this. We really only need to
+ differentiate between inline and standalone comments. The latter don't
+ share the line with any code.
+
+ Inline comments are emitted as regular token.COMMENT leaves. Standalone
+ are emitted with a fake STANDALONE_COMMENT token identifier.
+ """
total_consumed = 0
for pc in list_comments(
leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER, mode=mode
@@ -60,6 +87,7 @@
@lru_cache(maxsize=4096)
def list_comments(prefix: str, *, is_endmarker: bool, mode: Mode) -> list[ProtoComment]:
+ """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
result: list[ProtoComment] = []
if not prefix or "#" not in prefix:
return result
@@ -106,6 +134,10 @@
def normalize_trailing_prefix(leaf: LN, total_consumed: int) -> None:
+ """Normalize the prefix that's left over after generating comments.
+
+ Note: don't use backslashes for formatting or you'll lose your voting rights.
+ """
remainder = leaf.prefix[total_consumed:]
if "\\" not in remainder:
nl_count = remainder.count("\n")
@@ -117,6 +149,16 @@
def make_comment(content: str, mode: Mode) -> str:
+ """Return a consistently formatted comment from the given `content` string.
+
+ All comments (except for "##", "#!", "#:", '#'") should have a single
+ space between the hash sign and the content.
+
+ If `content` didn't start with a hash sign, one is provided.
+
+ Comments containing fmt directives are preserved exactly as-is to respect
+ user intent (e.g., `#no space # fmt: skip` stays as-is).
+ """
content = content.rstrip()
if not content:
return "#"
@@ -149,6 +191,7 @@ def normalize_fmt_off(
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
) -> None:
+ """Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
try_again = True
while try_again:
try_again = convert_one_fmt_off_pair(node, mode, lines)
@@ -157,6 +200,10 @@ def _should_process_fmt_comment(
comment: ProtoComment, leaf: Leaf
) -> tuple[bool, bool, bool]:
+ """Check if comment should be processed for fmt handling.
+
+ Returns (should_process, is_fmt_off, is_fmt_skip).
+ """
is_fmt_off = contains_fmt_directive(comment.value, FMT_OFF)
is_fmt_skip = contains_fmt_directive(comment.value, FMT_SKIP)
@@ -173,6 +220,11 @@ def _is_valid_standalone_fmt_comment(
comment: ProtoComment, leaf: Leaf, is_fmt_off: bool, is_fmt_skip: bool
) -> bool:
+ """Check if comment is a valid standalone fmt directive.
+
+ We only want standalone comments. If there's no previous leaf or if
+ the previous leaf is indentation, it's a standalone comment in disguise.
+ """
if comment.type == STANDALONE_COMMENT:
return True
@@ -195,6 +247,10 @@ previous_consumed: int,
mode: Mode,
) -> bool:
+ """Handle fmt:off/on blocks that contain only comments.
+
+ Returns True if a block was converted, False otherwise.
+ """
all_comments = list_comments(leaf.prefix, is_endmarker=False, mode=mode)
# Find the first fmt:off and its matching fmt:on
@@ -277,6 +333,10 @@ def convert_one_fmt_off_pair(
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
) -> bool:
+ """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
+
+ Returns True if a pair was converted.
+ """
for leaf in node.leaves():
# Skip STANDALONE_COMMENT nodes that were created by fmt:off/on/skip processing
# to avoid reprocessing them in subsequent iterations
@@ -337,6 +397,7 @@ lines: Collection[tuple[int, int]],
leaf: Leaf,
) -> None:
+ """Handle fmt blocks with actual AST nodes."""
first = ignored_nodes[0] # Can be a container node with the `leaf`.
parent = first.parent
prefix = first.prefix
@@ -431,6 +492,11 @@ def generate_ignored_nodes(
leaf: Leaf, comment: ProtoComment, mode: Mode
) -> Iterator[LN]:
+ """Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
+
+ If comment is skip, returns leaf only.
+ Stops at the end of the block.
+ """
if contains_fmt_directive(comment.value, FMT_SKIP):
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment, mode)
return
@@ -473,6 +539,18 @@
def _find_compound_statement_context(parent: Node) -> Node | None:
+ """Return the body node of a compound statement if we should respect fmt: skip.
+
+ This handles one-line compound statements like:
+ if condition: body # fmt: skip
+
+ When Black expands such statements, they temporarily look like:
+ if condition:
+ body # fmt: skip
+
+ In both cases, we want to return the body node (either the simple_stmt directly
+ or the suite containing it).
+ """
if parent.type != syms.simple_stmt:
return None
@@ -504,6 +582,11 @@ def _should_keep_compound_statement_inline(
body_node: Node, simple_stmt_parent: Node
) -> bool:
+ """Check if a compound statement should be kept on one line.
+
+ Returns True only for compound statements with semicolon-separated bodies,
+ like: if True: print("a"); print("b") # fmt: skip
+ """
# Check if there are semicolons in the body
for leaf in body_node.leaves():
if leaf.type == token.SEMI:
@@ -525,6 +608,7 @@ def _get_compound_statement_header(
body_node: Node, simple_stmt_parent: Node
) -> list[LN]:
+ """Get header nodes for a compound statement that should be preserved inline."""
if not _should_keep_compound_statement_inline(body_node, simple_stmt_parent):
return []
@@ -549,6 +633,7 @@ def _generate_ignored_nodes_from_fmt_skip(
leaf: Leaf, comment: ProtoComment, mode: Mode
) -> Iterator[LN]:
+ """Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`."""
prev_sibling = leaf.prev_sibling
parent = leaf.parent
ignored_nodes: list[LN] = []
@@ -674,6 +759,9 @@
def is_fmt_on(container: LN, mode: Mode) -> bool:
+ """Determine whether formatting is switched on within a container.
+ Determined by whether the last `# fmt:` comment is `on` or `off`.
+ """
fmt_on = False
for comment in list_comments(container.prefix, is_endmarker=False, mode=mode):
if contains_fmt_directive(comment.value, FMT_ON):
@@ -684,6 +772,7 @@
def children_contains_fmt_on(container: LN, mode: Mode) -> bool:
+ """Determine if children have formatting switched on."""
for child in container.children:
leaf = first_leaf_of(child)
if leaf is not None and is_fmt_on(leaf, mode=mode):
@@ -693,6 +782,12 @@
def contains_pragma_comment(comment_list: list[Leaf]) -> bool:
+ """
+ Returns:
+ True iff one of the comments in @comment_list is a pragma used by one
+ of the more common static analysis tools for python (e.g. mypy, flake8,
+ pylint).
+ """
for comment in comment_list:
if comment.value.startswith(("# type:", "# noqa", "# pylint:")):
return True
@@ -703,6 +798,18 @@ def contains_fmt_directive(
comment_line: str, directives: set[str] = FMT_OFF | FMT_ON | FMT_SKIP
) -> bool:
+ """
+ Checks if the given comment contains format directives, alone or paired with
+ other comments.
+
+ Defaults to checking all directives (skip, off, on, yapf), but can be
+ narrowed to specific ones.
+
+ Matching styles:
+ # foobar <-- single comment
+ # foobar # foobar # foobar <-- multiple comments
+ # foobar; foobar <-- list of comments (; separated)
+ """
semantic_comment_blocks = [
comment_line,
*[
@@ -717,4 +824,4 @@ ],
]
- return any(comment in directives for comment in semantic_comment_blocks)+ return any(comment in directives for comment in semantic_comment_blocks)
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/comments.py |
Write docstrings describing functionality |
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from click import style
from black.output import err, out
class Changed(Enum):
NO = 0
CACHED = 1
YES = 2
class NothingChanged(UserWarning):
@dataclass
class Report:
check: bool = False
diff: bool = False
quiet: bool = False
verbose: bool = False
change_count: int = 0
same_count: int = 0
failure_count: int = 0
def done(self, src: Path, changed: Changed) -> None:
if changed is Changed.YES:
reformatted = "would reformat" if self.check or self.diff else "reformatted"
if self.verbose or not self.quiet:
out(f"{reformatted} {src}")
self.change_count += 1
else:
if self.verbose:
if changed is Changed.NO:
msg = f"{src} already well formatted, good job."
else:
msg = f"{src} wasn't modified on disk since last run."
out(msg, bold=False)
self.same_count += 1
def failed(self, src: Path, message: str) -> None:
err(f"error: cannot format {src}: {message}")
self.failure_count += 1
def path_ignored(self, path: Path, message: str) -> None:
if self.verbose:
out(f"{path} ignored: {message}", bold=False)
@property
def return_code(self) -> int:
# According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
# 126 we have special return codes reserved by the shell.
if self.failure_count:
return 123
elif self.change_count and self.check:
return 1
return 0
def __str__(self) -> str:
if self.check or self.diff:
reformatted = "would be reformatted"
unchanged = "would be left unchanged"
failed = "would fail to reformat"
else:
reformatted = "reformatted"
unchanged = "left unchanged"
failed = "failed to reformat"
report = []
if self.change_count:
s = "s" if self.change_count > 1 else ""
report.append(
style(f"{self.change_count} file{s} ", bold=True, fg="blue")
+ style(f"{reformatted}", bold=True)
)
if self.same_count:
s = "s" if self.same_count > 1 else ""
report.append(style(f"{self.same_count} file{s} ", fg="blue") + unchanged)
if self.failure_count:
s = "s" if self.failure_count > 1 else ""
report.append(style(f"{self.failure_count} file{s} {failed}", fg="red"))
return ", ".join(report) + "." | --- +++ @@ -1,3 +1,6 @@+"""
+Summarize Black runs to users.
+"""
from dataclasses import dataclass
from enum import Enum
@@ -15,10 +18,12 @@
class NothingChanged(UserWarning):
+ """Raised when reformatted code is the same as source."""
@dataclass
class Report:
+ """Provides a reformatting counter. Can be rendered with `str(report)`."""
check: bool = False
diff: bool = False
@@ -29,6 +34,7 @@ failure_count: int = 0
def done(self, src: Path, changed: Changed) -> None:
+ """Increment the counter for successful reformatting. Write out a message."""
if changed is Changed.YES:
reformatted = "would reformat" if self.check or self.diff else "reformatted"
if self.verbose or not self.quiet:
@@ -44,6 +50,7 @@ self.same_count += 1
def failed(self, src: Path, message: str) -> None:
+ """Increment the counter for failed reformatting. Write out a message."""
err(f"error: cannot format {src}: {message}")
self.failure_count += 1
@@ -53,6 +60,13 @@
@property
def return_code(self) -> int:
+ """Return the exit code that the app should use.
+
+ This considers the current state of changed files and failures:
+ - if there were any failures, return 123;
+ - if any files were changed and --check is being used, return 1;
+ - otherwise return 0.
+ """
# According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
# 126 we have special return codes reserved by the shell.
if self.failure_count:
@@ -64,6 +78,10 @@ return 0
def __str__(self) -> str:
+ """Render a color report of the current state.
+
+ Use `click.unstyle` to remove colors.
+ """
if self.check or self.diff:
reformatted = "would be reformatted"
unchanged = "would be left unchanged"
@@ -86,4 +104,4 @@ if self.failure_count:
s = "s" if self.failure_count > 1 else ""
report.append(style(f"{self.failure_count} file{s} {failed}", fg="red"))
- return ", ".join(report) + "."+ return ", ".join(report) + "."
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/report.py |
Generate consistent documentation across files |
import re
import sys
from collections.abc import Collection, Iterator
from dataclasses import replace
from enum import Enum, auto
from functools import partial, wraps
from typing import Union, cast
from black.brackets import (
COMMA_PRIORITY,
DOT_PRIORITY,
STRING_PRIORITY,
get_leaves_inside_matching_brackets,
max_delimiter_priority_in_atom,
)
from black.comments import (
FMT_OFF,
FMT_ON,
contains_fmt_directive,
generate_comments,
list_comments,
)
from black.lines import (
Line,
RHSResult,
append_leaves,
can_be_split,
can_omit_invisible_parens,
is_line_short_enough,
line_to_string,
)
from black.mode import Feature, Mode, Preview
from black.nodes import (
ASSIGNMENTS,
BRACKETS,
CLOSING_BRACKETS,
OPENING_BRACKETS,
STANDALONE_COMMENT,
STATEMENT,
WHITESPACE,
Visitor,
ensure_visible,
fstring_tstring_to_string,
get_annotation_type,
has_sibling_with_type,
is_arith_like,
is_async_stmt_or_funcdef,
is_atom_with_invisible_parens,
is_docstring,
is_empty_tuple,
is_generator,
is_lpar_token,
is_multiline_string,
is_name_token,
is_one_sequence_between,
is_one_tuple,
is_parent_function_or_class,
is_part_of_annotation,
is_rpar_token,
is_stub_body,
is_stub_suite,
is_tuple,
is_tuple_containing_star,
is_tuple_containing_walrus,
is_type_ignore_comment_string,
is_vararg,
is_walrus_assignment,
is_yield,
syms,
wrap_in_parentheses,
)
from black.numerics import normalize_numeric_literal
from black.strings import (
fix_multiline_docstring,
get_string_prefix,
normalize_string_prefix,
normalize_string_quotes,
normalize_unicode_escape_sequences,
)
from black.trans import (
CannotTransform,
StringMerger,
StringParenStripper,
StringParenWrapper,
StringSplitter,
Transformer,
hug_power_op,
)
from blib2to3.pgen2 import token
from blib2to3.pytree import Leaf, Node
# types
LeafID = int
LN = Union[Leaf, Node]
class CannotSplit(CannotTransform):
# This isn't a dataclass because @dataclass + Generic breaks mypyc.
# See also https://github.com/mypyc/mypyc/issues/827.
class LineGenerator(Visitor[Line]):
def __init__(self, mode: Mode, features: Collection[Feature]) -> None:
self.mode = mode
self.features = features
self.current_line: Line
self.__post_init__()
def line(self, indent: int = 0) -> Iterator[Line]:
if not self.current_line:
self.current_line.depth += indent
return # Line is empty, don't emit. Creating a new one unnecessary.
if len(self.current_line.leaves) == 1 and is_async_stmt_or_funcdef(
self.current_line.leaves[0]
):
# Special case for async def/for/with statements. `visit_async_stmt`
# adds an `ASYNC` leaf then visits the child def/for/with statement
# nodes. Line yields from those nodes shouldn't treat the former
# `ASYNC` leaf as a complete line.
return
complete_line = self.current_line
self.current_line = Line(mode=self.mode, depth=complete_line.depth + indent)
yield complete_line
def visit_default(self, node: LN) -> Iterator[Line]:
if isinstance(node, Leaf):
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
for comment in generate_comments(node, mode=self.mode):
if any_open_brackets:
# any comment within brackets is subject to splitting
self.current_line.append(comment)
elif comment.type == token.COMMENT:
# regular trailing comment
self.current_line.append(comment)
yield from self.line()
else:
# regular standalone comment
yield from self.line()
self.current_line.append(comment)
yield from self.line()
if any_open_brackets:
node.prefix = ""
if node.type not in WHITESPACE:
self.current_line.append(node)
yield from super().visit_default(node)
def visit_test(self, node: Node) -> Iterator[Line]:
already_parenthesized = (
node.prev_sibling and node.prev_sibling.type == token.LPAR
)
if not already_parenthesized:
# Similar to logic in wrap_in_parentheses
lpar = Leaf(token.LPAR, "")
rpar = Leaf(token.RPAR, "")
prefix = node.prefix
node.prefix = ""
lpar.prefix = prefix
node.insert_child(0, lpar)
node.append_child(rpar)
yield from self.visit_default(node)
def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
# In blib2to3 INDENT never holds comments.
yield from self.line(+1)
yield from self.visit_default(node)
def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
# The current line might still wait for trailing comments. At DEDENT time
# there won't be any (they would be prefixes on the preceding NEWLINE).
# Emit the line then.
yield from self.line()
# While DEDENT has no value, its prefix may contain standalone comments
# that belong to the current indentation level. Get 'em.
yield from self.visit_default(node)
# Finally, emit the dedent.
yield from self.line(-1)
def visit_stmt(
self, node: Node, keywords: set[str], parens: set[str]
) -> Iterator[Line]:
normalize_invisible_parens(
node, parens_after=parens, mode=self.mode, features=self.features
)
for child in node.children:
if is_name_token(child) and child.value in keywords:
yield from self.line()
yield from self.visit(child)
def visit_typeparams(self, node: Node) -> Iterator[Line]:
yield from self.visit_default(node)
node.children[0].prefix = ""
def visit_typevartuple(self, node: Node) -> Iterator[Line]:
yield from self.visit_default(node)
node.children[1].prefix = ""
def visit_paramspec(self, node: Node) -> Iterator[Line]:
yield from self.visit_default(node)
node.children[1].prefix = ""
def visit_dictsetmaker(self, node: Node) -> Iterator[Line]:
if Preview.wrap_long_dict_values_in_parens in self.mode:
for i, child in enumerate(node.children):
if i == 0:
continue
if node.children[i - 1].type == token.COLON:
if (
child.type == syms.atom
and child.children[0].type in OPENING_BRACKETS
and not is_walrus_assignment(child)
):
maybe_make_parens_invisible_in_atom(
child,
parent=node,
mode=self.mode,
features=self.features,
remove_brackets_around_comma=False,
)
else:
wrap_in_parentheses(node, child, visible=False)
yield from self.visit_default(node)
def visit_funcdef(self, node: Node) -> Iterator[Line]:
yield from self.line()
# Remove redundant brackets around return type annotation.
is_return_annotation = False
for child in node.children:
if child.type == token.RARROW:
is_return_annotation = True
elif is_return_annotation:
if child.type == syms.atom and child.children[0].type == token.LPAR:
if maybe_make_parens_invisible_in_atom(
child,
parent=node,
mode=self.mode,
features=self.features,
remove_brackets_around_comma=False,
):
wrap_in_parentheses(node, child, visible=False)
else:
wrap_in_parentheses(node, child, visible=False)
is_return_annotation = False
for child in node.children:
yield from self.visit(child)
def visit_match_case(self, node: Node) -> Iterator[Line]:
normalize_invisible_parens(
node, parens_after=set(), mode=self.mode, features=self.features
)
yield from self.line()
for child in node.children:
yield from self.visit(child)
def visit_suite(self, node: Node) -> Iterator[Line]:
if is_stub_suite(node):
yield from self.visit(node.children[2])
else:
yield from self.visit_default(node)
def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
prev_type: int | None = None
for child in node.children:
if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child):
wrap_in_parentheses(node, child, visible=False)
prev_type = child.type
if node.parent and node.parent.type in STATEMENT:
if is_parent_function_or_class(node) and is_stub_body(node):
yield from self.visit_default(node)
else:
yield from self.line(+1)
yield from self.visit_default(node)
yield from self.line(-1)
else:
if node.parent and is_stub_suite(node.parent):
node.prefix = ""
yield from self.visit_default(node)
return
yield from self.line()
yield from self.visit_default(node)
def visit_async_stmt(self, node: Node) -> Iterator[Line]:
yield from self.line()
children = iter(node.children)
for child in children:
yield from self.visit(child)
if child.type == token.ASYNC or child.type == STANDALONE_COMMENT:
# STANDALONE_COMMENT happens when `# fmt: skip` is applied on the async
# line.
break
internal_stmt = next(children)
yield from self.visit(internal_stmt)
def visit_decorators(self, node: Node) -> Iterator[Line]:
for child in node.children:
yield from self.line()
yield from self.visit(child)
def visit_power(self, node: Node) -> Iterator[Line]:
for idx, leaf in enumerate(node.children[:-1]):
next_leaf = node.children[idx + 1]
if not isinstance(leaf, Leaf):
continue
value = leaf.value.lower()
if (
leaf.type == token.NUMBER
and next_leaf.type == syms.trailer
# Ensure that we are in an attribute trailer
and next_leaf.children[0].type == token.DOT
# It shouldn't wrap hexadecimal, binary and octal literals
and not value.startswith(("0x", "0b", "0o"))
# It shouldn't wrap complex literals
and "j" not in value
):
wrap_in_parentheses(node, leaf)
remove_await_parens(node, mode=self.mode, features=self.features)
yield from self.visit_default(node)
def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
yield from self.line()
def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
yield from self.visit_default(leaf)
yield from self.line()
def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]:
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
if not any_open_brackets:
yield from self.line()
# STANDALONE_COMMENT nodes created by our special handling in
# normalize_fmt_off for comment-only blocks have fmt:off as the first
# line and fmt:on as the last line (each directive on its own line,
# not embedded in other text). These should be appended directly
# without calling visit_default, which would process their prefix and
# lose indentation. Normal STANDALONE_COMMENT nodes go through
# visit_default.
value = leaf.value
lines = value.splitlines()
is_fmt_off_block = (
len(lines) >= 2
and contains_fmt_directive(lines[0], FMT_OFF)
and contains_fmt_directive(lines[-1], FMT_ON)
)
if is_fmt_off_block:
# This is a fmt:off/on block from normalize_fmt_off - we still need
# to process any prefix comments (like markdown comments) but append
# the fmt block itself directly to preserve its formatting
# Only process prefix comments if there actually is a prefix with comments
if leaf.prefix and any(
line.strip().startswith("#")
and not contains_fmt_directive(line.strip())
for line in leaf.prefix.split("\n")
):
for comment in generate_comments(leaf, mode=self.mode):
yield from self.line()
self.current_line.append(comment)
yield from self.line()
# Clear the prefix since we've processed it as comments above
leaf.prefix = ""
self.current_line.append(leaf)
if not any_open_brackets:
yield from self.line()
else:
# Normal standalone comment - process through visit_default
yield from self.visit_default(leaf)
def visit_factor(self, node: Node) -> Iterator[Line]:
_operator, operand = node.children
if (
operand.type == syms.power
and len(operand.children) == 3
and operand.children[1].type == token.DOUBLESTAR
):
lpar = Leaf(token.LPAR, "(")
rpar = Leaf(token.RPAR, ")")
index = operand.remove() or 0
node.insert_child(index, Node(syms.atom, [lpar, operand, rpar]))
yield from self.visit_default(node)
def visit_tname(self, node: Node) -> Iterator[Line]:
if len(node.children) == 3 and maybe_make_parens_invisible_in_atom(
node.children[2], parent=node, mode=self.mode, features=self.features
):
wrap_in_parentheses(node, node.children[2], visible=False)
yield from self.visit_default(node)
def visit_STRING(self, leaf: Leaf) -> Iterator[Line]:
normalize_unicode_escape_sequences(leaf)
if is_docstring(leaf) and not re.search(r"\\\s*\n", leaf.value):
# We're ignoring docstrings with backslash newline escapes because changing
# indentation of those changes the AST representation of the code.
if self.mode.string_normalization:
docstring = normalize_string_prefix(leaf.value)
# We handle string normalization at the end of this method, but since
# what we do right now acts differently depending on quote style (ex.
# see padding logic below), there's a possibility for unstable
# formatting. To avoid a situation where this function formats a
# docstring differently on the second pass, normalize it early.
docstring = normalize_string_quotes(docstring)
else:
docstring = leaf.value
prefix = get_string_prefix(docstring)
docstring = docstring[len(prefix) :] # Remove the prefix
quote_char = docstring[0]
# A natural way to remove the outer quotes is to do:
# docstring = docstring.strip(quote_char)
# but that breaks on """""x""" (which is '""x').
# So we actually need to remove the first character and the next two
# characters but only if they are the same as the first.
quote_len = 1 if docstring[1] != quote_char else 3
docstring = docstring[quote_len:-quote_len]
docstring_started_empty = not docstring
indent = " " * 4 * self.current_line.depth
if is_multiline_string(leaf):
docstring = fix_multiline_docstring(docstring, indent)
else:
docstring = docstring.strip()
has_trailing_backslash = False
if docstring:
# Add some padding if the docstring starts / ends with a quote mark.
if docstring[0] == quote_char:
docstring = " " + docstring
if docstring[-1] == quote_char:
docstring += " "
if docstring[-1] == "\\":
backslash_count = len(docstring) - len(docstring.rstrip("\\"))
if backslash_count % 2:
# Odd number of tailing backslashes, add some padding to
# avoid escaping the closing string quote.
docstring += " "
has_trailing_backslash = True
elif not docstring_started_empty:
docstring = " "
# We could enforce triple quotes at this point.
quote = quote_char * quote_len
# It's invalid to put closing single-character quotes on a new line.
if quote_len == 3:
# We need to find the length of the last line of the docstring
# to find if we can add the closing quotes to the line without
# exceeding the maximum line length.
# If docstring is one line, we don't put the closing quotes on a
# separate line because it looks ugly (#3320).
lines = docstring.splitlines()
last_line_length = len(lines[-1]) if docstring else 0
# If adding closing quotes would cause the last line to exceed
# the maximum line length, and the closing quote is not
# prefixed by a newline then put a line break before
# the closing quotes
if (
len(lines) > 1
and last_line_length + quote_len > self.mode.line_length
and len(indent) + quote_len <= self.mode.line_length
and not has_trailing_backslash
):
if leaf.value[-1 - quote_len] == "\n":
leaf.value = prefix + quote + docstring + quote
else:
leaf.value = prefix + quote + docstring + "\n" + indent + quote
else:
leaf.value = prefix + quote + docstring + quote
else:
leaf.value = prefix + quote + docstring + quote
if self.mode.string_normalization and leaf.type == token.STRING:
leaf.value = normalize_string_prefix(leaf.value)
leaf.value = normalize_string_quotes(leaf.value)
yield from self.visit_default(leaf)
def visit_NUMBER(self, leaf: Leaf) -> Iterator[Line]:
normalize_numeric_literal(leaf)
yield from self.visit_default(leaf)
def visit_atom(self, node: Node) -> Iterator[Line]:
if len(node.children) == 3:
first = node.children[0]
last = node.children[-1]
if (first.type == token.LSQB and last.type == token.RSQB) or (
first.type == token.LBRACE and last.type == token.RBRACE
):
# Lists or sets of one item
maybe_make_parens_invisible_in_atom(
node.children[1],
parent=node,
mode=self.mode,
features=self.features,
)
yield from self.visit_default(node)
def visit_fstring(self, node: Node) -> Iterator[Line]:
# currently we don't want to format and split f-strings at all.
string_leaf = fstring_tstring_to_string(node)
node.replace(string_leaf)
if "\\" in string_leaf.value and any(
"\\" in str(child)
for child in node.children
if child.type == syms.fstring_replacement_field
):
# string normalization doesn't account for nested quotes,
# causing breakages. skip normalization when nested quotes exist
yield from self.visit_default(string_leaf)
return
yield from self.visit_STRING(string_leaf)
def visit_tstring(self, node: Node) -> Iterator[Line]:
# currently we don't want to format and split t-strings at all.
string_leaf = fstring_tstring_to_string(node)
node.replace(string_leaf)
if "\\" in string_leaf.value and any(
"\\" in str(child)
for child in node.children
if child.type == syms.fstring_replacement_field
):
# string normalization doesn't account for nested quotes,
# causing breakages. skip normalization when nested quotes exist
yield from self.visit_default(string_leaf)
return
yield from self.visit_STRING(string_leaf)
# TODO: Uncomment Implementation to format f-string children
# fstring_start = node.children[0]
# fstring_end = node.children[-1]
# assert isinstance(fstring_start, Leaf)
# assert isinstance(fstring_end, Leaf)
# quote_char = fstring_end.value[0]
# quote_idx = fstring_start.value.index(quote_char)
# prefix, quote = (
# fstring_start.value[:quote_idx],
# fstring_start.value[quote_idx:]
# )
# if not is_docstring(node, self.mode):
# prefix = normalize_string_prefix(prefix)
# assert quote == fstring_end.value
# is_raw_fstring = "r" in prefix or "R" in prefix
# middles = [
# leaf
# for leaf in node.leaves()
# if leaf.type == token.FSTRING_MIDDLE
# ]
# if self.mode.string_normalization:
# middles, quote = normalize_fstring_quotes(quote, middles, is_raw_fstring)
# fstring_start.value = prefix + quote
# fstring_end.value = quote
# yield from self.visit_default(node)
def visit_comp_for(self, node: Node) -> Iterator[Line]:
if Preview.wrap_comprehension_in in self.mode:
normalize_invisible_parens(
node, parens_after={"in"}, mode=self.mode, features=self.features
)
yield from self.visit_default(node)
def visit_old_comp_for(self, node: Node) -> Iterator[Line]:
yield from self.visit_comp_for(node)
def __post_init__(self) -> None:
self.current_line = Line(mode=self.mode)
v = self.visit_stmt
Ø: set[str] = set()
self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","})
self.visit_if_stmt = partial(
v, keywords={"if", "else", "elif"}, parens={"if", "elif"}
)
self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"})
self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"})
self.visit_try_stmt = partial(
v, keywords={"try", "except", "else", "finally"}, parens=Ø
)
self.visit_except_clause = partial(v, keywords={"except"}, parens={"except"})
self.visit_with_stmt = partial(v, keywords={"with"}, parens={"with"})
self.visit_classdef = partial(v, keywords={"class"}, parens=Ø)
self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)
self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"})
self.visit_import_from = partial(v, keywords=Ø, parens={"import"})
self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"})
self.visit_async_funcdef = self.visit_async_stmt
self.visit_decorated = self.visit_decorators
# PEP 634
self.visit_match_stmt = self.visit_match_case
self.visit_case_block = self.visit_match_case
self.visit_guard = partial(v, keywords=Ø, parens={"if"})
# Remove when `simplify_power_operator_hugging` becomes stable.
def _hugging_power_ops_line_to_string(
line: Line,
features: Collection[Feature],
mode: Mode,
) -> str | None:
try:
return line_to_string(next(hug_power_op(line, features, mode)))
except CannotTransform:
return None
def transform_line(
line: Line, mode: Mode, features: Collection[Feature] = ()
) -> Iterator[Line]:
if line.is_comment:
yield line
return
line_str = line_to_string(line)
if Preview.simplify_power_operator_hugging in mode:
line_str_hugging_power_ops = line_str
else:
# We need the line string when power operators are hugging to determine if we
# should split the line. Default to line_str, if no power operator are present
# on the line.
line_str_hugging_power_ops = (
_hugging_power_ops_line_to_string(line, features, mode) or line_str
)
ll = mode.line_length
sn = mode.string_normalization
string_merge = StringMerger(ll, sn)
string_paren_strip = StringParenStripper(ll, sn)
string_split = StringSplitter(ll, sn)
string_paren_wrap = StringParenWrapper(ll, sn)
transformers: list[Transformer]
if (
not line.contains_uncollapsable_type_comments()
and not line.should_split_rhs
and not line.magic_trailing_comma
and (
is_line_short_enough(line, mode=mode, line_str=line_str_hugging_power_ops)
or line.contains_unsplittable_type_ignore()
)
and not (line.inside_brackets and line.contains_standalone_comments())
and not line.contains_implicit_multiline_string_with_comments()
):
# Only apply basic string preprocessing, since lines shouldn't be split here.
if Preview.string_processing in mode:
transformers = [string_merge, string_paren_strip]
else:
transformers = []
elif line.is_def and not should_split_funcdef_with_rhs(line, mode):
transformers = [left_hand_split]
else:
def _rhs(
self: object, line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
for omit in generate_trailers_to_omit(line, mode.line_length):
lines = list(right_hand_split(line, mode, features, omit=omit))
# Note: this check is only able to figure out if the first line of the
# *current* transformation fits in the line length. This is true only
# for simple cases. All others require running more transforms via
# `transform_line()`. This check doesn't know if those would succeed.
if is_line_short_enough(lines[0], mode=mode):
yield from lines
return
# All splits failed, best effort split with no omits.
# This mostly happens to multiline strings that are by definition
# reported as not fitting a single line, as well as lines that contain
# trailing commas (those have to be exploded).
yield from right_hand_split(line, mode, features=features)
# HACK: nested functions (like _rhs) compiled by mypyc don't retain their
# __name__ attribute which is needed in `run_transformer` further down.
# Unfortunately a nested class breaks mypyc too. So a class must be created
# via type ... https://github.com/mypyc/mypyc/issues/884
rhs = type("rhs", (), {"__call__": _rhs})()
if Preview.string_processing in mode:
if line.inside_brackets:
transformers = [
string_merge,
string_paren_strip,
string_split,
delimiter_split,
standalone_comment_split,
string_paren_wrap,
rhs,
]
else:
transformers = [
string_merge,
string_paren_strip,
string_split,
string_paren_wrap,
rhs,
]
else:
if line.inside_brackets:
transformers = [delimiter_split, standalone_comment_split, rhs]
else:
transformers = [rhs]
if Preview.simplify_power_operator_hugging not in mode:
# It's always safe to attempt hugging of power operations and pretty much every
# line could match.
transformers.append(hug_power_op)
for transform in transformers:
# We are accumulating lines in `result` because we might want to abort
# mission and return the original line in the end, or attempt a different
# split altogether.
try:
result = run_transformer(line, transform, mode, features, line_str=line_str)
except CannotTransform:
continue
else:
yield from result
break
else:
yield line
def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool:
return_type_leaves: list[Leaf] = []
in_return_type = False
for leaf in line.leaves:
if leaf.type == token.COLON:
in_return_type = False
if in_return_type:
return_type_leaves.append(leaf)
if leaf.type == token.RARROW:
in_return_type = True
# using `bracket_split_build_line` will mess with whitespace, so we duplicate a
# couple lines from it.
result = Line(mode=line.mode, depth=line.depth)
leaves_to_track = get_leaves_inside_matching_brackets(return_type_leaves)
for leaf in return_type_leaves:
result.append(
leaf,
preformatted=True,
track_bracket=id(leaf) in leaves_to_track,
)
# we could also return true if the line is too long, and the return type is longer
# than the param list. Or if `should_split_rhs` returns True.
return result.magic_trailing_comma is not None
class _BracketSplitComponent(Enum):
head = auto()
body = auto()
tail = auto()
def left_hand_split(
line: Line, _features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
for leaf_type in [token.LPAR, token.LSQB]:
tail_leaves: list[Leaf] = []
body_leaves: list[Leaf] = []
head_leaves: list[Leaf] = []
current_leaves = head_leaves
matching_bracket: Leaf | None = None
depth = 0
for index, leaf in enumerate(line.leaves):
if index == 2 and leaf.type == token.LSQB:
# A [ at index 2 means this is a type param, so start
# tracking the depth
depth += 1
elif depth > 0:
if leaf.type == token.LSQB:
depth += 1
elif leaf.type == token.RSQB:
depth -= 1
if (
current_leaves is body_leaves
and leaf.type in CLOSING_BRACKETS
and leaf.opening_bracket is matching_bracket
and isinstance(matching_bracket, Leaf)
# If the code is still on LPAR and we are inside a type
# param, ignore the match since this is searching
# for the function arguments
and not (leaf_type == token.LPAR and depth > 0)
):
ensure_visible(leaf)
ensure_visible(matching_bracket)
current_leaves = tail_leaves if body_leaves else head_leaves
current_leaves.append(leaf)
if current_leaves is head_leaves:
if leaf.type == leaf_type and (
not (leaf_type == token.LPAR and depth > 0)
):
matching_bracket = leaf
current_leaves = body_leaves
if matching_bracket and tail_leaves:
break
if not matching_bracket or not tail_leaves:
raise CannotSplit("No brackets found")
head = bracket_split_build_line(
head_leaves, line, matching_bracket, component=_BracketSplitComponent.head
)
body = bracket_split_build_line(
body_leaves, line, matching_bracket, component=_BracketSplitComponent.body
)
tail = bracket_split_build_line(
tail_leaves, line, matching_bracket, component=_BracketSplitComponent.tail
)
bracket_split_succeeded_or_raise(head, body, tail)
for result in (head, body, tail):
if result:
yield result
def right_hand_split(
line: Line,
mode: Mode,
features: Collection[Feature] = (),
omit: Collection[LeafID] = (),
) -> Iterator[Line]:
rhs_result = _first_right_hand_split(line, omit=omit)
yield from _maybe_split_omitting_optional_parens(
rhs_result, line, mode, features=features, omit=omit
)
def _first_right_hand_split(
line: Line,
omit: Collection[LeafID] = (),
) -> RHSResult:
tail_leaves: list[Leaf] = []
body_leaves: list[Leaf] = []
head_leaves: list[Leaf] = []
current_leaves = tail_leaves
opening_bracket: Leaf | None = None
closing_bracket: Leaf | None = None
for leaf in reversed(line.leaves):
if current_leaves is body_leaves:
if leaf is opening_bracket:
current_leaves = head_leaves if body_leaves else tail_leaves
current_leaves.append(leaf)
if current_leaves is tail_leaves:
if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
current_leaves = body_leaves
if not (opening_bracket and closing_bracket and head_leaves):
# If there is no opening or closing_bracket that means the split failed and
# all content is in the tail. Otherwise, if `head_leaves` are empty, it means
# the matching `opening_bracket` wasn't available on `line` anymore.
raise CannotSplit("No brackets found")
tail_leaves.reverse()
body_leaves.reverse()
head_leaves.reverse()
body: Line | None = None
if (
Preview.hug_parens_with_braces_and_square_brackets in line.mode
and tail_leaves[0].value
and tail_leaves[0].opening_bracket is head_leaves[-1]
):
inner_body_leaves = list(body_leaves)
hugged_opening_leaves: list[Leaf] = []
hugged_closing_leaves: list[Leaf] = []
is_unpacking = body_leaves[0].type in [token.STAR, token.DOUBLESTAR]
unpacking_offset: int = 1 if is_unpacking else 0
while (
len(inner_body_leaves) >= 2 + unpacking_offset
and inner_body_leaves[-1].type in CLOSING_BRACKETS
and inner_body_leaves[-1].opening_bracket
is inner_body_leaves[unpacking_offset]
):
if unpacking_offset:
hugged_opening_leaves.append(inner_body_leaves.pop(0))
unpacking_offset = 0
hugged_opening_leaves.append(inner_body_leaves.pop(0))
hugged_closing_leaves.insert(0, inner_body_leaves.pop())
if hugged_opening_leaves and inner_body_leaves:
inner_body = bracket_split_build_line(
inner_body_leaves,
line,
hugged_opening_leaves[-1],
component=_BracketSplitComponent.body,
)
if (
line.mode.magic_trailing_comma
and inner_body_leaves[-1].type == token.COMMA
):
should_hug = True
else:
line_length = line.mode.line_length - sum(
len(str(leaf))
for leaf in hugged_opening_leaves + hugged_closing_leaves
)
if is_line_short_enough(
inner_body, mode=replace(line.mode, line_length=line_length)
):
# Do not hug if it fits on a single line.
should_hug = False
else:
should_hug = True
if should_hug:
body_leaves = inner_body_leaves
head_leaves.extend(hugged_opening_leaves)
tail_leaves = hugged_closing_leaves + tail_leaves
body = inner_body # No need to re-calculate the body again later.
head = bracket_split_build_line(
head_leaves, line, opening_bracket, component=_BracketSplitComponent.head
)
if body is None:
body = bracket_split_build_line(
body_leaves, line, opening_bracket, component=_BracketSplitComponent.body
)
tail = bracket_split_build_line(
tail_leaves, line, opening_bracket, component=_BracketSplitComponent.tail
)
bracket_split_succeeded_or_raise(head, body, tail)
return RHSResult(head, body, tail, opening_bracket, closing_bracket)
def _maybe_split_omitting_optional_parens(
rhs: RHSResult,
line: Line,
mode: Mode,
features: Collection[Feature] = (),
omit: Collection[LeafID] = (),
) -> Iterator[Line]:
if (
Feature.FORCE_OPTIONAL_PARENTHESES not in features
# the opening bracket is an optional paren
and rhs.opening_bracket.type == token.LPAR
and not rhs.opening_bracket.value
# the closing bracket is an optional paren
and rhs.closing_bracket.type == token.RPAR
and not rhs.closing_bracket.value
# it's not an import (optional parens are the only thing we can split on
# in this case; attempting a split without them is a waste of time)
and not line.is_import
# and we can actually remove the parens
and can_omit_invisible_parens(rhs, mode.line_length)
):
omit = {id(rhs.closing_bracket), *omit}
try:
# The RHSResult Omitting Optional Parens.
rhs_oop = _first_right_hand_split(line, omit=omit)
if _prefer_split_rhs_oop_over_rhs(rhs_oop, rhs, mode):
yield from _maybe_split_omitting_optional_parens(
rhs_oop, line, mode, features=features, omit=omit
)
return
except CannotSplit as e:
# For chained assignments we want to use the previous successful split
if line.is_chained_assignment:
pass
elif (
not can_be_split(rhs.body)
and not is_line_short_enough(rhs.body, mode=mode)
and not (
Preview.wrap_long_dict_values_in_parens
and rhs.opening_bracket.parent
and rhs.opening_bracket.parent.parent
and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker
)
):
raise CannotSplit(
"Splitting failed, body is still too long and can't be split."
) from e
elif (
rhs.head.contains_multiline_strings()
or rhs.tail.contains_multiline_strings()
):
raise CannotSplit(
"The current optional pair of parentheses is bound to fail to"
" satisfy the splitting algorithm because the head or the tail"
" contains multiline strings which by definition never fit one"
" line."
) from e
ensure_visible(rhs.opening_bracket)
ensure_visible(rhs.closing_bracket)
for result in (rhs.head, rhs.body, rhs.tail):
if result:
yield result
def _prefer_split_rhs_oop_over_rhs(
rhs_oop: RHSResult, rhs: RHSResult, mode: Mode
) -> bool:
# contains unsplittable type ignore
if (
rhs_oop.head.contains_unsplittable_type_ignore()
or rhs_oop.body.contains_unsplittable_type_ignore()
or rhs_oop.tail.contains_unsplittable_type_ignore()
):
return True
# Retain optional parens around dictionary values
if (
Preview.wrap_long_dict_values_in_parens
and rhs.opening_bracket.parent
and rhs.opening_bracket.parent.parent
and rhs.opening_bracket.parent.parent.type == syms.dictsetmaker
and rhs.body.bracket_tracker.delimiters
):
# Unless the split is inside the key
return any(leaf.type == token.COLON for leaf in rhs_oop.tail.leaves)
# the split is right after `=`
if not (len(rhs.head.leaves) >= 2 and rhs.head.leaves[-2].type == token.EQUAL):
return True
# the left side of assignment contains brackets
if not any(leaf.type in BRACKETS for leaf in rhs.head.leaves[:-1]):
return True
# the left side of assignment is short enough (the -1 is for the ending optional
# paren)
if not is_line_short_enough(
rhs.head, mode=replace(mode, line_length=mode.line_length - 1)
):
return True
# the left side of assignment won't explode further because of magic trailing comma
if rhs.head.magic_trailing_comma is not None:
return True
# If we have multiple targets, we prefer more `=`s on the head vs pushing them to
# the body
rhs_head_equal_count = [leaf.type for leaf in rhs.head.leaves].count(token.EQUAL)
rhs_oop_head_equal_count = [leaf.type for leaf in rhs_oop.head.leaves].count(
token.EQUAL
)
if rhs_head_equal_count > 1 and rhs_head_equal_count > rhs_oop_head_equal_count:
return False
has_closing_bracket_after_assign = False
for leaf in reversed(rhs_oop.head.leaves):
if leaf.type == token.EQUAL:
break
if leaf.type in CLOSING_BRACKETS:
has_closing_bracket_after_assign = True
break
return (
# contains matching brackets after the `=` (done by checking there is a
# closing bracket)
has_closing_bracket_after_assign
or (
# the split is actually from inside the optional parens (done by checking
# the first line still contains the `=`)
any(leaf.type == token.EQUAL for leaf in rhs_oop.head.leaves)
# the first line is short enough
and is_line_short_enough(rhs_oop.head, mode=mode)
)
)
def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
tail_len = len(str(tail).strip())
if not body:
if tail_len == 0:
raise CannotSplit("Splitting brackets produced the same line")
elif tail_len < 3:
raise CannotSplit(
f"Splitting brackets on an empty body to save {tail_len} characters is"
" not worth it"
)
def _ensure_trailing_comma(
leaves: list[Leaf], original: Line, opening_bracket: Leaf
) -> bool:
if not leaves:
return False
# Ensure a trailing comma for imports
if original.is_import:
return True
# ...and standalone function arguments
if not original.is_def:
return False
if opening_bracket.value != "(":
return False
# Don't add commas if we already have any commas
if any(
leaf.type == token.COMMA and not is_part_of_annotation(leaf) for leaf in leaves
):
return False
# Find a leaf with a parent (comments don't have parents)
leaf_with_parent = next((leaf for leaf in leaves if leaf.parent), None)
if leaf_with_parent is None:
return True
# Don't add commas inside parenthesized return annotations
if get_annotation_type(leaf_with_parent) == "return":
return False
# Don't add commas inside PEP 604 unions
if (
leaf_with_parent.parent
and leaf_with_parent.parent.next_sibling
and leaf_with_parent.parent.next_sibling.type == token.VBAR
):
return False
return True
def bracket_split_build_line(
leaves: list[Leaf],
original: Line,
opening_bracket: Leaf,
*,
component: _BracketSplitComponent,
) -> Line:
result = Line(mode=original.mode, depth=original.depth)
if component is _BracketSplitComponent.body:
result.inside_brackets = True
result.depth += 1
if _ensure_trailing_comma(leaves, original, opening_bracket):
for i in range(len(leaves) - 1, -1, -1):
if leaves[i].type == STANDALONE_COMMENT:
continue
if leaves[i].type != token.COMMA:
new_comma = Leaf(token.COMMA, ",")
leaves.insert(i + 1, new_comma)
break
leaves_to_track: set[LeafID] = set()
if component is _BracketSplitComponent.head:
leaves_to_track = get_leaves_inside_matching_brackets(leaves)
# Populate the line
for leaf in leaves:
result.append(
leaf,
preformatted=True,
track_bracket=id(leaf) in leaves_to_track,
)
for comment_after in original.comments_after(leaf):
result.append(comment_after, preformatted=True)
if component is _BracketSplitComponent.body and should_split_line(
result, opening_bracket
):
result.should_split_rhs = True
return result
def dont_increase_indentation(split_func: Transformer) -> Transformer:
@wraps(split_func)
def split_wrapper(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
for split_line in split_func(line, features, mode):
split_line.leaves[0].prefix = ""
yield split_line
return split_wrapper
def _get_last_non_comment_leaf(line: Line) -> int | None:
for leaf_idx in range(len(line.leaves) - 1, 0, -1):
if line.leaves[leaf_idx].type != STANDALONE_COMMENT:
return leaf_idx
return None
def _can_add_trailing_comma(leaf: Leaf, features: Collection[Feature]) -> bool:
if is_vararg(leaf, within={syms.typedargslist}):
return Feature.TRAILING_COMMA_IN_DEF in features
if is_vararg(leaf, within={syms.arglist, syms.argument}):
return Feature.TRAILING_COMMA_IN_CALL in features
return True
def _safe_add_trailing_comma(safe: bool, delimiter_priority: int, line: Line) -> Line:
if (
safe
and delimiter_priority == COMMA_PRIORITY
and line.leaves[-1].type != token.COMMA
and line.leaves[-1].type != STANDALONE_COMMENT
):
new_comma = Leaf(token.COMMA, ",")
line.append(new_comma)
return line
MIGRATE_COMMENT_DELIMITERS = {STRING_PRIORITY, COMMA_PRIORITY}
@dont_increase_indentation
def delimiter_split(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
if len(line.leaves) == 0:
raise CannotSplit("Line empty") from None
last_leaf = line.leaves[-1]
bt = line.bracket_tracker
try:
delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)})
except ValueError:
raise CannotSplit("No delimiters found") from None
if (
delimiter_priority == DOT_PRIORITY
and bt.delimiter_count_with_priority(delimiter_priority) == 1
):
raise CannotSplit("Splitting a single attribute from its owner looks wrong")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
lowest_depth = sys.maxsize
trailing_comma_safe = True
def append_to_line(leaf: Leaf) -> Iterator[Line]:
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
def append_comments(leaf: Leaf) -> Iterator[Line]:
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
last_non_comment_leaf = _get_last_non_comment_leaf(line)
for leaf_idx, leaf in enumerate(line.leaves):
yield from append_to_line(leaf)
previous_priority = leaf_idx > 0 and bt.delimiters.get(
id(line.leaves[leaf_idx - 1])
)
if (
previous_priority != delimiter_priority
or delimiter_priority in MIGRATE_COMMENT_DELIMITERS
):
yield from append_comments(leaf)
lowest_depth = min(lowest_depth, leaf.bracket_depth)
if trailing_comma_safe and leaf.bracket_depth == lowest_depth:
trailing_comma_safe = _can_add_trailing_comma(leaf, features)
if last_leaf.type == STANDALONE_COMMENT and leaf_idx == last_non_comment_leaf:
current_line = _safe_add_trailing_comma(
trailing_comma_safe, delimiter_priority, current_line
)
leaf_priority = bt.delimiters.get(id(leaf))
if leaf_priority == delimiter_priority:
if (
leaf_idx + 1 < len(line.leaves)
and delimiter_priority not in MIGRATE_COMMENT_DELIMITERS
):
yield from append_comments(line.leaves[leaf_idx + 1])
yield current_line
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
if current_line:
current_line = _safe_add_trailing_comma(
trailing_comma_safe, delimiter_priority, current_line
)
yield current_line
@dont_increase_indentation
def standalone_comment_split(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
if not line.contains_standalone_comments():
raise CannotSplit("Line does not have any standalone comments")
current_line = Line(
mode=line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
def append_to_line(leaf: Leaf) -> Iterator[Line]:
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
except ValueError:
yield current_line
current_line = Line(
line.mode, depth=line.depth, inside_brackets=line.inside_brackets
)
current_line.append(leaf)
for leaf in line.leaves:
yield from append_to_line(leaf)
for comment_after in line.comments_after(leaf):
yield from append_to_line(comment_after)
if current_line:
yield current_line
def normalize_invisible_parens(
node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature]
) -> None:
for pc in list_comments(node.prefix, is_endmarker=False, mode=mode):
if contains_fmt_directive(pc.value, FMT_OFF):
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
return
# The multiple context managers grammar has a different pattern, thus this is
# separate from the for-loop below. This possibly wraps them in invisible parens,
# and later will be removed in remove_with_parens when needed.
if node.type == syms.with_stmt:
_maybe_wrap_cms_in_parens(node, mode, features)
check_lpar = False
for index, child in enumerate(list(node.children)):
# Fixes a bug where invisible parens are not properly stripped from
# assignment statements that contain type annotations.
if isinstance(child, Node) and child.type == syms.annassign:
normalize_invisible_parens(
child, parens_after=parens_after, mode=mode, features=features
)
# Fixes a bug where invisible parens are not properly wrapped around
# case blocks.
if isinstance(child, Node) and child.type == syms.case_block:
normalize_invisible_parens(
child, parens_after={"case"}, mode=mode, features=features
)
# Add parentheses around if guards in case blocks
if isinstance(child, Node) and child.type == syms.guard:
normalize_invisible_parens(
child, parens_after={"if"}, mode=mode, features=features
)
# Add parentheses around long tuple unpacking in assignments.
if (
index == 0
and isinstance(child, Node)
and child.type == syms.testlist_star_expr
):
check_lpar = True
if (
index == 0
and isinstance(child, Node)
and child.type == syms.atom
and node.type == syms.expr_stmt
and not _atom_has_magic_trailing_comma(child, mode)
and not _is_atom_multiline(child)
):
if maybe_make_parens_invisible_in_atom(
child,
parent=node,
mode=mode,
features=features,
remove_brackets_around_comma=True,
allow_star_expr=True,
):
wrap_in_parentheses(node, child, visible=False)
if check_lpar:
if (
child.type == syms.atom
and node.type == syms.for_stmt
and isinstance(child.prev_sibling, Leaf)
and child.prev_sibling.type == token.NAME
and child.prev_sibling.value == "for"
):
if maybe_make_parens_invisible_in_atom(
child,
parent=node,
mode=mode,
features=features,
remove_brackets_around_comma=True,
):
wrap_in_parentheses(node, child, visible=False)
elif isinstance(child, Node) and node.type == syms.with_stmt:
remove_with_parens(child, node, mode=mode, features=features)
elif child.type == syms.atom and not (
"in" in parens_after
and len(child.children) == 3
and is_lpar_token(child.children[0])
and is_rpar_token(child.children[-1])
and child.children[1].type == syms.test
):
if maybe_make_parens_invisible_in_atom(
child, parent=node, mode=mode, features=features
):
wrap_in_parentheses(node, child, visible=False)
elif is_one_tuple(child):
wrap_in_parentheses(node, child, visible=True)
elif node.type == syms.import_from:
_normalize_import_from(node, child, index)
break
elif (
index == 1
and child.type == token.STAR
and node.type == syms.except_clause
):
# In except* (PEP 654), the star is actually part of
# of the keyword. So we need to skip the insertion of
# invisible parentheses to work more precisely.
continue
elif (
isinstance(child, Leaf)
and child.next_sibling is not None
and child.next_sibling.type == token.COLON
and child.value == "case"
):
# A special patch for "case case:" scenario, the second occurrence
# of case will be not parsed as a Python keyword.
break
elif not is_multiline_string(child):
if (
Preview.fix_if_guard_explosion_in_case_statement in mode
and node.type == syms.guard
):
mock_line = Line(mode=mode)
for leaf in child.leaves():
mock_line.append(leaf)
# If it's a guard AND it's short, we DON'T wrap
if not is_line_short_enough(mock_line, mode=mode):
wrap_in_parentheses(node, child, visible=False)
else:
wrap_in_parentheses(node, child, visible=False)
comma_check = child.type == token.COMMA
check_lpar = isinstance(child, Leaf) and (
child.value in parens_after or comma_check
)
def _normalize_import_from(parent: Node, child: LN, index: int) -> None:
# "import from" nodes store parentheses directly as part of
# the statement
if is_lpar_token(child):
assert is_rpar_token(parent.children[-1])
# make parentheses invisible
child.value = ""
parent.children[-1].value = ""
elif child.type != token.STAR:
# insert invisible parentheses
parent.insert_child(index, Leaf(token.LPAR, ""))
parent.append_child(Leaf(token.RPAR, ""))
def remove_await_parens(node: Node, mode: Mode, features: Collection[Feature]) -> None:
if node.children[0].type == token.AWAIT and len(node.children) > 1:
if (
node.children[1].type == syms.atom
and node.children[1].children[0].type == token.LPAR
):
if maybe_make_parens_invisible_in_atom(
node.children[1],
parent=node,
mode=mode,
features=features,
remove_brackets_around_comma=True,
):
wrap_in_parentheses(node, node.children[1], visible=False)
# Since await is an expression we shouldn't remove
# brackets in cases where this would change
# the AST due to operator precedence.
# Therefore we only aim to remove brackets around
# power nodes that aren't also await expressions themselves.
# https://peps.python.org/pep-0492/#updated-operator-precedence-table
# N.B. We've still removed any redundant nested brackets though :)
opening_bracket = cast(Leaf, node.children[1].children[0])
closing_bracket = cast(Leaf, node.children[1].children[-1])
bracket_contents = node.children[1].children[1]
if isinstance(bracket_contents, Node) and (
bracket_contents.type != syms.power
or bracket_contents.children[0].type == token.AWAIT
or any(
isinstance(child, Leaf) and child.type == token.DOUBLESTAR
for child in bracket_contents.children
)
):
ensure_visible(opening_bracket)
ensure_visible(closing_bracket)
def _maybe_wrap_cms_in_parens(
node: Node, mode: Mode, features: Collection[Feature]
) -> None:
if (
Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features
or len(node.children) <= 2
# If it's an atom, it's already wrapped in parens.
or node.children[1].type == syms.atom
):
return
colon_index: int | None = None
for i in range(2, len(node.children)):
if node.children[i].type == token.COLON:
colon_index = i
break
if colon_index is not None:
lpar = Leaf(token.LPAR, "")
rpar = Leaf(token.RPAR, "")
context_managers = node.children[1:colon_index]
for child in context_managers:
child.remove()
# After wrapping, the with_stmt will look like this:
# with_stmt
# NAME 'with'
# atom
# LPAR ''
# testlist_gexp
# ... <-- context_managers
# /testlist_gexp
# RPAR ''
# /atom
# COLON ':'
new_child = Node(
syms.atom, [lpar, Node(syms.testlist_gexp, context_managers), rpar]
)
node.insert_child(1, new_child)
def remove_with_parens(
node: Node, parent: Node, mode: Mode, features: Collection[Feature]
) -> None:
# Removing all unnecessary parentheses in with statements in one pass is a tad
# complex as different variations of bracketed statements result in pretty
# different parse trees:
#
# with (open("file")) as f: # this is an asexpr_test
# ...
#
# with (open("file") as f): # this is an atom containing an
# ... # asexpr_test
#
# with (open("file")) as f, (open("file")) as f: # this is asexpr_test, COMMA,
# ... # asexpr_test
#
# with (open("file") as f, open("file") as f): # an atom containing a
# ... # testlist_gexp which then
# # contains multiple asexpr_test(s)
if node.type == syms.atom:
if maybe_make_parens_invisible_in_atom(
node,
parent=parent,
mode=mode,
features=features,
remove_brackets_around_comma=True,
):
wrap_in_parentheses(parent, node, visible=False)
if isinstance(node.children[1], Node):
remove_with_parens(node.children[1], node, mode=mode, features=features)
elif node.type == syms.testlist_gexp:
for child in node.children:
if isinstance(child, Node):
remove_with_parens(child, node, mode=mode, features=features)
elif node.type == syms.asexpr_test and not any(
leaf.type == token.COLONEQUAL for leaf in node.leaves()
):
if maybe_make_parens_invisible_in_atom(
node.children[0],
parent=node,
mode=mode,
features=features,
remove_brackets_around_comma=True,
):
wrap_in_parentheses(node, node.children[0], visible=False)
def _atom_has_magic_trailing_comma(node: LN, mode: Mode) -> bool:
if not mode.magic_trailing_comma:
return False
return is_one_tuple(node)
def _is_atom_multiline(node: LN) -> bool:
if not isinstance(node, Node) or len(node.children) < 3:
return False
# Check the middle child (between LPAR and RPAR) for newlines in its subtree
# The first child's prefix contains blank lines/comments before the opening paren
middle = node.children[1]
for child in middle.pre_order():
if isinstance(child, Leaf) and "\n" in child.prefix:
return True
return False
def maybe_make_parens_invisible_in_atom(
node: LN,
parent: LN,
mode: Mode,
features: Collection[Feature],
remove_brackets_around_comma: bool = False,
allow_star_expr: bool = False,
) -> bool:
if (
node.type not in (syms.atom, syms.expr)
or is_empty_tuple(node)
or is_one_tuple(node)
or (is_tuple(node) and parent.type == syms.asexpr_test)
or (
is_tuple(node)
and parent.type == syms.with_stmt
and has_sibling_with_type(node, token.COMMA)
)
or (is_yield(node) and parent.type != syms.expr_stmt)
or (
# This condition tries to prevent removing non-optional brackets
# around a tuple, however, can be a bit overzealous so we provide
# and option to skip this check for `for` and `with` statements.
not remove_brackets_around_comma
and max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY
# Remove parentheses around multiple exception types in except and
# except* without as. See PEP 758 for details.
and not (
Feature.UNPARENTHESIZED_EXCEPT_TYPES in features
# is a tuple
and is_tuple(node)
# has a parent node
and node.parent is not None
# parent is an except clause
and node.parent.type == syms.except_clause
# is not immediately followed by as clause
and not (
node.next_sibling is not None
and is_name_token(node.next_sibling)
and node.next_sibling.value == "as"
)
)
)
or is_tuple_containing_walrus(node)
or (not allow_star_expr and is_tuple_containing_star(node))
or is_generator(node)
):
return False
if is_walrus_assignment(node):
if parent.type in [
syms.annassign,
syms.expr_stmt,
syms.assert_stmt,
syms.return_stmt,
syms.except_clause,
syms.funcdef,
syms.with_stmt,
syms.testlist_gexp,
syms.tname,
# these ones aren't useful to end users, but they do please fuzzers
syms.for_stmt,
syms.del_stmt,
syms.for_stmt,
]:
return False
first = node.children[0]
last = node.children[-1]
if is_lpar_token(first) and is_rpar_token(last):
middle = node.children[1]
# make parentheses invisible
if (
# If the prefix of `middle` includes a type comment with
# ignore annotation, then we do not remove the parentheses
not is_type_ignore_comment_string(middle.prefix.strip(), mode=mode)
):
first.value = ""
last.value = ""
maybe_make_parens_invisible_in_atom(
middle,
parent=parent,
mode=mode,
features=features,
remove_brackets_around_comma=remove_brackets_around_comma,
)
if is_atom_with_invisible_parens(middle):
# Strip the invisible parens from `middle` by replacing
# it with the child in-between the invisible parens
middle.replace(middle.children[1])
if middle.children[0].prefix.strip():
# Preserve comments before first paren
middle.children[1].prefix = (
middle.children[0].prefix + middle.children[1].prefix
)
if middle.children[-1].prefix.strip():
# Preserve comments before last paren
last.prefix = middle.children[-1].prefix + last.prefix
return False
return True
def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
if not (opening_bracket.parent and opening_bracket.value in "[{("):
return False
# We're essentially checking if the body is delimited by commas and there's more
# than one of them (we're excluding the trailing comma and if the delimiter priority
# is still commas, that means there's more).
exclude = set()
trailing_comma = False
try:
last_leaf = line.leaves[-1]
if last_leaf.type == token.COMMA:
trailing_comma = True
exclude.add(id(last_leaf))
max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude)
except (IndexError, ValueError):
return False
return max_priority == COMMA_PRIORITY and (
(line.mode.magic_trailing_comma and trailing_comma)
# always explode imports
or opening_bracket.parent.type in {syms.atom, syms.import_from}
)
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]:
omit: set[LeafID] = set()
if not line.magic_trailing_comma:
yield omit
length = 4 * line.depth
opening_bracket: Leaf | None = None
closing_bracket: Leaf | None = None
inner_brackets: set[LeafID] = set()
for index, leaf, leaf_length in line.enumerate_with_length(is_reversed=True):
length += leaf_length
if length > line_length:
break
has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix)
if leaf.type == STANDALONE_COMMENT or has_inline_comment:
break
if opening_bracket:
if leaf is opening_bracket:
opening_bracket = None
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if (
prev
and prev.type == token.COMMA
and leaf.opening_bracket is not None
and not is_one_sequence_between(
leaf.opening_bracket, leaf, line.leaves
)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
inner_brackets.add(id(leaf))
elif leaf.type in CLOSING_BRACKETS:
prev = line.leaves[index - 1] if index > 0 else None
if prev and prev.type in OPENING_BRACKETS:
# Empty brackets would fail a split so treat them as "inner"
# brackets (e.g. only add them to the `omit` set if another
# pair of brackets was good enough.
inner_brackets.add(id(leaf))
continue
if closing_bracket:
omit.add(id(closing_bracket))
omit.update(inner_brackets)
inner_brackets.clear()
yield omit
if (
prev
and prev.type == token.COMMA
and leaf.opening_bracket is not None
and not is_one_sequence_between(leaf.opening_bracket, leaf, line.leaves)
):
# Never omit bracket pairs with trailing commas.
# We need to explode on those.
break
if leaf.value:
opening_bracket = leaf.opening_bracket
closing_bracket = leaf
def run_transformer(
line: Line,
transform: Transformer,
mode: Mode,
features: Collection[Feature],
*,
line_str: str = "",
) -> list[Line]:
if not line_str:
line_str = line_to_string(line)
result: list[Line] = []
for transformed_line in transform(line, features, mode):
if str(transformed_line).strip("\n") == line_str:
raise CannotTransform("Line transformer returned an unchanged result")
result.extend(transform_line(transformed_line, mode=mode, features=features))
features_set = set(features)
if (
Feature.FORCE_OPTIONAL_PARENTHESES in features_set
or transform.__class__.__name__ != "rhs"
or not line.bracket_tracker.invisible
or any(bracket.value for bracket in line.bracket_tracker.invisible)
or line.contains_multiline_strings()
or result[0].contains_uncollapsable_type_comments()
or result[0].contains_unsplittable_type_ignore()
or is_line_short_enough(result[0], mode=mode)
# If any leaves have no parents (which _can_ occur since
# `transform(line)` potentially destroys the line's underlying node
# structure), then we can't proceed. Doing so would cause the below
# call to `append_leaves()` to fail.
or any(leaf.parent is None for leaf in line.leaves)
):
return result
line_copy = line.clone()
append_leaves(line_copy, line, line.leaves)
features_fop = features_set | {Feature.FORCE_OPTIONAL_PARENTHESES}
second_opinion = run_transformer(
line_copy, transform, mode, features_fop, line_str=line_str
)
if all(is_line_short_enough(ln, mode=mode) for ln in second_opinion):
result = second_opinion
return result | --- +++ @@ -1,3 +1,6 @@+"""
+Generating lines of code.
+"""
import re
import sys
@@ -96,11 +99,17 @@
class CannotSplit(CannotTransform):
+ """A readable split that fits the allotted line length is impossible."""
# This isn't a dataclass because @dataclass + Generic breaks mypyc.
# See also https://github.com/mypyc/mypyc/issues/827.
class LineGenerator(Visitor[Line]):
+ """Generates reformatted Line objects. Empty lines are not emitted.
+
+ Note: destroys the tree it's visiting by mutating prefixes of its leaves
+ in ways that will no longer stringify to valid Python code on the tree.
+ """
def __init__(self, mode: Mode, features: Collection[Feature]) -> None:
self.mode = mode
@@ -109,6 +118,13 @@ self.__post_init__()
def line(self, indent: int = 0) -> Iterator[Line]:
+ """Generate a line.
+
+ If the line is empty, only emit if it makes sense.
+ If the line is too long, split it first and then generate.
+
+ If any lines were generated, set up a new current_line.
+ """
if not self.current_line:
self.current_line.depth += indent
return # Line is empty, don't emit. Creating a new one unnecessary.
@@ -127,6 +143,7 @@ yield complete_line
def visit_default(self, node: LN) -> Iterator[Line]:
+ """Default `visit_*()` implementation. Recurses to children of `node`."""
if isinstance(node, Leaf):
any_open_brackets = self.current_line.bracket_tracker.any_open_brackets()
for comment in generate_comments(node, mode=self.mode):
@@ -152,6 +169,7 @@ yield from super().visit_default(node)
def visit_test(self, node: Node) -> Iterator[Line]:
+ """Visit an `x if y else z` test"""
already_parenthesized = (
node.prev_sibling and node.prev_sibling.type == token.LPAR
@@ -170,11 +188,13 @@ yield from self.visit_default(node)
def visit_INDENT(self, node: Leaf) -> Iterator[Line]:
+ """Increase indentation level, maybe yield a line."""
# In blib2to3 INDENT never holds comments.
yield from self.line(+1)
yield from self.visit_default(node)
def visit_DEDENT(self, node: Leaf) -> Iterator[Line]:
+ """Decrease indentation level, maybe yield a line."""
# The current line might still wait for trailing comments. At DEDENT time
# there won't be any (they would be prefixes on the preceding NEWLINE).
# Emit the line then.
@@ -190,6 +210,17 @@ def visit_stmt(
self, node: Node, keywords: set[str], parens: set[str]
) -> Iterator[Line]:
+ """Visit a statement.
+
+ This implementation is shared for `if`, `while`, `for`, `try`, `except`,
+ `def`, `with`, `class`, `assert`, and assignments.
+
+ The relevant Python language `keywords` for a given statement will be
+ NAME leaves within it. This methods puts those on a separate line.
+
+ `parens` holds a set of string leaf values immediately after which
+ invisible parens should be put.
+ """
normalize_invisible_parens(
node, parens_after=parens, mode=self.mode, features=self.features
)
@@ -234,6 +265,7 @@ yield from self.visit_default(node)
def visit_funcdef(self, node: Node) -> Iterator[Line]:
+ """Visit function definition."""
yield from self.line()
# Remove redundant brackets around return type annotation.
@@ -259,6 +291,7 @@ yield from self.visit(child)
def visit_match_case(self, node: Node) -> Iterator[Line]:
+ """Visit either a match or case statement."""
normalize_invisible_parens(
node, parens_after=set(), mode=self.mode, features=self.features
)
@@ -268,12 +301,14 @@ yield from self.visit(child)
def visit_suite(self, node: Node) -> Iterator[Line]:
+ """Visit a suite."""
if is_stub_suite(node):
yield from self.visit(node.children[2])
else:
yield from self.visit_default(node)
def visit_simple_stmt(self, node: Node) -> Iterator[Line]:
+ """Visit a statement without nested statements."""
prev_type: int | None = None
for child in node.children:
if (prev_type is None or prev_type == token.SEMI) and is_arith_like(child):
@@ -297,6 +332,7 @@ yield from self.visit_default(node)
def visit_async_stmt(self, node: Node) -> Iterator[Line]:
+ """Visit `async def`, `async for`, `async with`."""
yield from self.line()
children = iter(node.children)
@@ -312,6 +348,7 @@ yield from self.visit(internal_stmt)
def visit_decorators(self, node: Node) -> Iterator[Line]:
+ """Visit decorators."""
for child in node.children:
yield from self.line()
yield from self.visit(child)
@@ -341,9 +378,11 @@ yield from self.visit_default(node)
def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]:
+ """Remove a semicolon and put the other statement on a separate line."""
yield from self.line()
def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]:
+ """End of file. Process outstanding comments and end with a newline."""
yield from self.visit_default(leaf)
yield from self.line()
@@ -391,6 +430,10 @@ yield from self.visit_default(leaf)
def visit_factor(self, node: Node) -> Iterator[Line]:
+ """Force parentheses between a unary op and a binary power:
+
+ -2 ** 8 -> -(2 ** 8)
+ """
_operator, operand = node.children
if (
operand.type == syms.power
@@ -404,6 +447,16 @@ yield from self.visit_default(node)
def visit_tname(self, node: Node) -> Iterator[Line]:
+ """
+ Add potential parentheses around types in function parameter lists to be made
+ into real parentheses in case the type hint is too long to fit on a line
+ Examples:
+ def foo(a: int, b: float = 7): ...
+
+ ->
+
+ def foo(a: (int), b: (float) = 7): ...
+ """
if len(node.children) == 3 and maybe_make_parens_invisible_in_atom(
node.children[2], parent=node, mode=self.mode, features=self.features
):
@@ -504,6 +557,7 @@ yield from self.visit_default(leaf)
def visit_atom(self, node: Node) -> Iterator[Line]:
+ """Visit any atom"""
if len(node.children) == 3:
first = node.children[0]
last = node.children[-1]
@@ -594,6 +648,7 @@ yield from self.visit_comp_for(node)
def __post_init__(self) -> None:
+ """You are in a twisty little maze of passages."""
self.current_line = Line(mode=self.mode)
v = self.visit_stmt
@@ -639,6 +694,12 @@ def transform_line(
line: Line, mode: Mode, features: Collection[Feature] = ()
) -> Iterator[Line]:
+ """Transform a `line`, potentially splitting it into many lines.
+
+ They should fit in the allotted `line_length` but might not be able to.
+
+ `features` are syntactical features that may be used in the output.
+ """
if line.is_comment:
yield line
return
@@ -686,6 +747,12 @@ def _rhs(
self: object, line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
+ """Wraps calls to `right_hand_split`.
+
+ The calls increasingly `omit` right-hand trailers (bracket pairs with
+ content), meaning the trailers get glued together to split on another
+ bracket pair instead.
+ """
for omit in generate_trailers_to_omit(line, mode.line_length):
lines = list(right_hand_split(line, mode, features, omit=omit))
# Note: this check is only able to figure out if the first line of the
@@ -755,6 +822,9 @@
def should_split_funcdef_with_rhs(line: Line, mode: Mode) -> bool:
+ """If a funcdef has a magic trailing comma in the return type, then we should first
+ split the line with rhs to respect the comma.
+ """
return_type_leaves: list[Leaf] = []
in_return_type = False
@@ -791,6 +861,12 @@ def left_hand_split(
line: Line, _features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
+ """Split line into many lines, starting with the first matching bracket pair.
+
+ Note: this usually looks weird, only use this for function definitions.
+ Prefer RHS otherwise. This is why this function is not symmetrical with
+ :func:`right_hand_split` which also handles optional parentheses.
+ """
for leaf_type in [token.LPAR, token.LSQB]:
tail_leaves: list[Leaf] = []
body_leaves: list[Leaf] = []
@@ -854,6 +930,14 @@ features: Collection[Feature] = (),
omit: Collection[LeafID] = (),
) -> Iterator[Line]:
+ """Split line into many lines, starting with the last matching bracket pair.
+
+ If the split was by optional parentheses, attempt splitting without them, too.
+ `omit` is a collection of closing bracket IDs that shouldn't be considered for
+ this split.
+
+ Note: running this function modifies `bracket_depth` on the leaves of `line`.
+ """
rhs_result = _first_right_hand_split(line, omit=omit)
yield from _maybe_split_omitting_optional_parens(
rhs_result, line, mode, features=features, omit=omit
@@ -864,6 +948,12 @@ line: Line,
omit: Collection[LeafID] = (),
) -> RHSResult:
+ """Split the line into head, body, tail starting with the last bracket pair.
+
+ Note: this function should not have side effects. It's relied upon by
+ _maybe_split_omitting_optional_parens to get an opinion whether to prefer
+ splitting on the right side of an assignment statement.
+ """
tail_leaves: list[Leaf] = []
body_leaves: list[Leaf] = []
head_leaves: list[Leaf] = []
@@ -1028,6 +1118,10 @@ def _prefer_split_rhs_oop_over_rhs(
rhs_oop: RHSResult, rhs: RHSResult, mode: Mode
) -> bool:
+ """
+ Returns whether we should prefer the result from a split omitting optional parens
+ (rhs_oop) over the original (rhs).
+ """
# contains unsplittable type ignore
if (
rhs_oop.head.contains_unsplittable_type_ignore()
@@ -1097,6 +1191,19 @@
def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None:
+ """Raise :exc:`CannotSplit` if the last left- or right-hand split failed.
+
+ Do nothing otherwise.
+
+ A left- or right-hand split is based on a pair of brackets. Content before
+ (and including) the opening bracket is left on one line, content inside the
+ brackets is put on a separate line, and finally content starting with and
+ following the closing bracket is put on a separate line.
+
+ Those are called `head`, `body`, and `tail`, respectively. If the split
+ produced the same line (all content in `head`) or ended up with an empty `body`
+ and the `tail` is just the closing bracket, then it's considered failed.
+ """
tail_len = len(str(tail).strip())
if not body:
if tail_len == 0:
@@ -1152,6 +1259,15 @@ *,
component: _BracketSplitComponent,
) -> Line:
+ """Return a new line with given `leaves` and respective comments from `original`.
+
+ If it's the head component, brackets will be tracked so trailing commas are
+ respected.
+
+ If it's the body component, the result line is one-indented inside brackets and as
+ such has its first leaf's prefix normalized and a trailing comma added when
+ expected.
+ """
result = Line(mode=original.mode, depth=original.depth)
if component is _BracketSplitComponent.body:
result.inside_brackets = True
@@ -1186,6 +1302,10 @@
def dont_increase_indentation(split_func: Transformer) -> Transformer:
+ """Normalize prefix of the first leaf in every line returned by `split_func`.
+
+ This is a decorator over relevant split functions.
+ """
@wraps(split_func)
def split_wrapper(
@@ -1232,6 +1352,11 @@ def delimiter_split(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
+ """Split according to delimiters of the highest priority.
+
+ If the appropriate Features are given, the split will add trailing commas
+ also in function signatures and calls that contain `*` and `**`.
+ """
if len(line.leaves) == 0:
raise CannotSplit("Line empty") from None
last_leaf = line.leaves[-1]
@@ -1255,6 +1380,7 @@ trailing_comma_safe = True
def append_to_line(leaf: Leaf) -> Iterator[Line]:
+ """Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
@@ -1316,6 +1442,7 @@ def standalone_comment_split(
line: Line, features: Collection[Feature], mode: Mode
) -> Iterator[Line]:
+ """Split standalone comments from the rest of the line."""
if not line.contains_standalone_comments():
raise CannotSplit("Line does not have any standalone comments")
@@ -1324,6 +1451,7 @@ )
def append_to_line(leaf: Leaf) -> Iterator[Line]:
+ """Append `leaf` to current line or to new line if appending impossible."""
nonlocal current_line
try:
current_line.append_safe(leaf, preformatted=True)
@@ -1348,6 +1476,14 @@ def normalize_invisible_parens(
node: Node, parens_after: set[str], *, mode: Mode, features: Collection[Feature]
) -> None:
+ """Make existing optional parentheses invisible or create new ones.
+
+ `parens_after` is a set of string leaf values immediately after which parens
+ should be put.
+
+ Standardizes on visible parentheses for single-element tuples, and keeps
+ existing visible parentheses for other tuples and generator expressions.
+ """
for pc in list_comments(node.prefix, is_endmarker=False, mode=mode):
if contains_fmt_directive(pc.value, FMT_OFF):
# This `node` has a prefix with `# fmt: off`, don't mess with parens.
@@ -1536,6 +1672,10 @@ def _maybe_wrap_cms_in_parens(
node: Node, mode: Mode, features: Collection[Feature]
) -> None:
+ """When enabled and safe, wrap the multiple context managers in invisible parens.
+
+ It is only safe when `features` contain Feature.PARENTHESIZED_CONTEXT_MANAGERS.
+ """
if (
Feature.PARENTHESIZED_CONTEXT_MANAGERS not in features
or len(node.children) <= 2
@@ -1574,6 +1714,7 @@ def remove_with_parens(
node: Node, parent: Node, mode: Mode, features: Collection[Feature]
) -> None:
+ """Recursively hide optional parens in `with` statements."""
# Removing all unnecessary parentheses in with statements in one pass is a tad
# complex as different variations of bracketed statements result in pretty
# different parse trees:
@@ -1619,6 +1760,11 @@
def _atom_has_magic_trailing_comma(node: LN, mode: Mode) -> bool:
+ """Check if an atom node has a magic trailing comma.
+
+ Returns True for single-element tuples with trailing commas like (a,),
+ which should be preserved to maintain their tuple type.
+ """
if not mode.magic_trailing_comma:
return False
@@ -1626,6 +1772,7 @@
def _is_atom_multiline(node: LN) -> bool:
+ """Check if an atom node is multiline (indicating intentional formatting)."""
if not isinstance(node, Node) or len(node.children) < 3:
return False
@@ -1647,6 +1794,12 @@ remove_brackets_around_comma: bool = False,
allow_star_expr: bool = False,
) -> bool:
+ """If it's safe, make the parens in the atom `node` invisible, recursively.
+ Additionally, remove repeated, adjacent invisible parens from the atom `node`
+ as they are redundant.
+
+ Returns whether the node should itself be wrapped in invisible parentheses.
+ """
if (
node.type not in (syms.atom, syms.expr)
or is_empty_tuple(node)
@@ -1747,6 +1900,7 @@
def should_split_line(line: Line, opening_bracket: Leaf) -> bool:
+ """Should `line` be immediately split with `delimiter_split()` after RHS?"""
if not (opening_bracket.parent and opening_bracket.value in "[{("):
return False
@@ -1773,6 +1927,15 @@
def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[set[LeafID]]:
+ """Generate sets of closing bracket IDs that should be omitted in a RHS.
+
+ Brackets can be omitted if the entire trailer up to and including
+ a preceding closing bracket fits in one line.
+
+ Yielded sets are cumulative (contain results of previous yields, too). First
+ set is empty, unless the line should explode, in which case bracket pairs until
+ the one that needs to explode are omitted.
+ """
omit: set[LeafID] = set()
if not line.magic_trailing_comma:
@@ -1882,4 +2045,4 @@ )
if all(is_line_short_enough(ln, mode=mode) for ln in second_opinion):
result = second_opinion
- return result+ return result
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/linegen.py |
Generate NumPy-style docstrings | import io
import os
import sys
from collections.abc import Iterable, Iterator, Sequence
from functools import lru_cache
from pathlib import Path
from re import Pattern
from typing import TYPE_CHECKING, Any, Union
from mypy_extensions import mypyc_attr
from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet
from packaging.version import InvalidVersion, Version
from pathspec import GitIgnoreSpec
from pathspec.patterns.gitignore import GitIgnorePatternError
if sys.version_info >= (3, 11):
try:
import tomllib
except ImportError:
# Help users on older alphas
if not TYPE_CHECKING:
import tomli as tomllib
else:
import tomli as tomllib
from black.handle_ipynb_magics import jupyter_dependencies_are_installed
from black.mode import TargetVersion
from black.output import err
from black.report import Report
if TYPE_CHECKING:
import colorama
@lru_cache
def _load_toml(path: Path | str) -> dict[str, Any]:
with open(path, "rb") as f:
return tomllib.load(f)
@lru_cache
def _cached_resolve(path: Path) -> Path:
return path.resolve()
@lru_cache
def find_project_root(
srcs: Sequence[str], stdin_filename: str | None = None
) -> tuple[Path, str]:
if stdin_filename is not None:
srcs = tuple(stdin_filename if s == "-" else s for s in srcs)
if not srcs:
srcs = [str(_cached_resolve(Path.cwd()))]
path_srcs = [_cached_resolve(Path(Path.cwd(), src)) for src in srcs]
# A list of lists of parents for each 'src'. 'src' is included as a
# "parent" of itself if it is a directory
src_parents = [
list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
]
common_base = max(
set.intersection(*(set(parents) for parents in src_parents)),
key=lambda path: path.parts,
)
for directory in (common_base, *common_base.parents):
if (directory / ".git").exists():
return directory, ".git directory"
if (directory / ".hg").is_dir():
return directory, ".hg directory"
if (directory / "pyproject.toml").is_file():
pyproject_toml = _load_toml(directory / "pyproject.toml")
if "black" in pyproject_toml.get("tool", {}):
return directory, "pyproject.toml"
return directory, "file system root"
def find_pyproject_toml(
path_search_start: tuple[str, ...], stdin_filename: str | None = None
) -> str | None:
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
path_pyproject_toml = path_project_root / "pyproject.toml"
if path_pyproject_toml.is_file():
return str(path_pyproject_toml)
try:
path_user_pyproject_toml = find_user_pyproject_toml()
return (
str(path_user_pyproject_toml)
if path_user_pyproject_toml.is_file()
else None
)
except (PermissionError, RuntimeError) as e:
# We do not have access to the user-level config directory, so ignore it.
err(f"Ignoring user configuration directory due to {e!r}")
return None
@mypyc_attr(patchable=True)
def parse_pyproject_toml(path_config: str) -> dict[str, Any]:
pyproject_toml = _load_toml(path_config)
config: dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
if "target_version" not in config:
inferred_target_version = infer_target_version(pyproject_toml)
if inferred_target_version is not None:
config["target_version"] = [v.name.lower() for v in inferred_target_version]
return config
def infer_target_version(
pyproject_toml: dict[str, Any],
) -> list[TargetVersion] | None:
project_metadata = pyproject_toml.get("project", {})
requires_python = project_metadata.get("requires-python", None)
if requires_python is not None:
try:
return parse_req_python_version(requires_python)
except InvalidVersion:
pass
try:
return parse_req_python_specifier(requires_python)
except (InvalidSpecifier, InvalidVersion):
pass
return None
def parse_req_python_version(requires_python: str) -> list[TargetVersion] | None:
version = Version(requires_python)
if version.release[0] != 3:
return None
try:
return [TargetVersion(version.release[1])]
except (IndexError, ValueError):
return None
def parse_req_python_specifier(requires_python: str) -> list[TargetVersion] | None:
specifier_set = strip_specifier_set(SpecifierSet(requires_python))
if not specifier_set:
return None
target_version_map = {f"3.{v.value}": v for v in TargetVersion}
compatible_versions: list[str] = list(specifier_set.filter(target_version_map))
if compatible_versions:
return [target_version_map[v] for v in compatible_versions]
return None
def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet:
specifiers = []
for s in specifier_set:
if "*" in str(s):
specifiers.append(s)
elif s.operator in ["~=", "==", ">=", "==="]:
version = Version(s.version)
stripped = Specifier(f"{s.operator}{version.major}.{version.minor}")
specifiers.append(stripped)
elif s.operator == ">":
version = Version(s.version)
if len(version.release) > 2:
s = Specifier(f">={version.major}.{version.minor}")
specifiers.append(s)
else:
specifiers.append(s)
return SpecifierSet(",".join(str(s) for s in specifiers))
@lru_cache
def find_user_pyproject_toml() -> Path:
if sys.platform == "win32":
# Windows
user_config_path = Path.home() / ".black"
else:
config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config")
user_config_path = Path(config_root).expanduser() / "black"
return _cached_resolve(user_config_path)
@lru_cache
def get_gitignore(root: Path) -> GitIgnoreSpec:
gitignore = root / ".gitignore"
lines: list[str] = []
if gitignore.is_file():
with gitignore.open(encoding="utf-8") as gf:
lines = gf.readlines()
try:
return GitIgnoreSpec.from_lines(lines)
except GitIgnorePatternError as e:
err(f"Could not parse {gitignore}: {e}")
raise
def resolves_outside_root_or_cannot_stat(
path: Path,
root: Path,
report: Report | None = None,
) -> bool:
try:
resolved_path = _cached_resolve(path)
except OSError as e:
if report:
report.path_ignored(path, f"cannot be read because {e}")
return True
try:
resolved_path.relative_to(root)
except ValueError:
if report:
report.path_ignored(path, f"is a symbolic link that points outside {root}")
return True
return False
def best_effort_relative_path(path: Path, root: Path) -> Path:
# Precondition: resolves_outside_root_or_cannot_stat(path, root) is False
try:
return path.absolute().relative_to(root)
except ValueError:
pass
root_parent = next((p for p in path.parents if _cached_resolve(p) == root), None)
if root_parent is not None:
return path.relative_to(root_parent)
# something adversarial, fallback to path guaranteed by precondition
return _cached_resolve(path).relative_to(root)
def _path_is_ignored(
root_relative_path: str,
root: Path,
gitignore_dict: dict[Path, GitIgnoreSpec],
) -> bool:
path = root / root_relative_path
# Note that this logic is sensitive to the ordering of gitignore_dict. Callers must
# ensure that gitignore_dict is ordered from least specific to most specific.
for gitignore_path, pattern in gitignore_dict.items():
try:
relative_path = path.relative_to(gitignore_path).as_posix()
if path.is_dir():
relative_path = relative_path + "/"
except ValueError:
break
if pattern.match_file(relative_path):
return True
return False
def path_is_excluded(
normalized_path: str,
pattern: Pattern[str] | None,
) -> bool:
match = pattern.search(normalized_path) if pattern else None
return bool(match and match.group(0))
def gen_python_files(
paths: Iterable[Path],
root: Path,
include: Pattern[str],
exclude: Pattern[str],
extend_exclude: Pattern[str] | None,
force_exclude: Pattern[str] | None,
report: Report,
gitignore_dict: dict[Path, GitIgnoreSpec] | None,
*,
verbose: bool,
quiet: bool,
) -> Iterator[Path]:
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
for child in paths:
assert child.is_absolute()
root_relative_path = child.relative_to(root).as_posix()
# First ignore files matching .gitignore, if passed
if gitignore_dict and _path_is_ignored(
root_relative_path, root, gitignore_dict
):
report.path_ignored(child, "matches a .gitignore file content")
continue
# Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options.
root_relative_path = "/" + root_relative_path
if child.is_dir():
root_relative_path += "/"
if path_is_excluded(root_relative_path, exclude):
report.path_ignored(child, "matches the --exclude regular expression")
continue
if path_is_excluded(root_relative_path, extend_exclude):
report.path_ignored(
child, "matches the --extend-exclude regular expression"
)
continue
if path_is_excluded(root_relative_path, force_exclude):
report.path_ignored(child, "matches the --force-exclude regular expression")
continue
if resolves_outside_root_or_cannot_stat(child, root, report):
continue
if child.is_dir():
# If gitignore is None, gitignore usage is disabled, while a Falsey
# gitignore is when the directory doesn't have a .gitignore file.
if gitignore_dict is not None:
new_gitignore_dict = {
**gitignore_dict,
root / child: get_gitignore(child),
}
else:
new_gitignore_dict = None
yield from gen_python_files(
child.iterdir(),
root,
include,
exclude,
extend_exclude,
force_exclude,
report,
new_gitignore_dict,
verbose=verbose,
quiet=quiet,
)
elif child.is_file():
if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed(
warn=verbose or not quiet
):
continue
include_match = include.search(root_relative_path) if include else True
if include_match:
yield child
def wrap_stream_for_windows(
f: io.TextIOWrapper,
) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]:
try:
from colorama.initialise import wrap_stream
except ImportError:
return f
else:
# Set `strip=False` to avoid needing to modify test_express_diff_with_color.
return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True) | --- +++ @@ -47,6 +47,21 @@ def find_project_root(
srcs: Sequence[str], stdin_filename: str | None = None
) -> tuple[Path, str]:
+ """Return a directory containing .git, .hg, or pyproject.toml.
+
+ pyproject.toml files are only considered if they contain a [tool.black]
+ section and are ignored otherwise.
+
+ That directory will be a common parent of all files and directories
+ passed in `srcs`.
+
+ If no directory in the tree contains a marker that would specify it's the
+ project root, the root of the file system is returned.
+
+ Returns a two-tuple with the first element as the project root path and
+ the second element as a string describing the method by which the
+ project root was discovered.
+ """
if stdin_filename is not None:
srcs = tuple(stdin_filename if s == "-" else s for s in srcs)
if not srcs:
@@ -83,6 +98,7 @@ def find_pyproject_toml(
path_search_start: tuple[str, ...], stdin_filename: str | None = None
) -> str | None:
+ """Find the absolute filepath to a pyproject.toml if it exists"""
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
path_pyproject_toml = path_project_root / "pyproject.toml"
if path_pyproject_toml.is_file():
@@ -103,6 +119,10 @@
@mypyc_attr(patchable=True)
def parse_pyproject_toml(path_config: str) -> dict[str, Any]:
+ """Parse a pyproject toml file, pulling out relevant parts for Black.
+
+ If parsing fails, will raise a tomllib.TOMLDecodeError.
+ """
pyproject_toml = _load_toml(path_config)
config: dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
@@ -118,6 +138,13 @@ def infer_target_version(
pyproject_toml: dict[str, Any],
) -> list[TargetVersion] | None:
+ """Infer Black's target version from the project metadata in pyproject.toml.
+
+ Supports the PyPA standard format (PEP 621):
+ https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#requires-python
+
+ If the target version cannot be inferred, returns None.
+ """
project_metadata = pyproject_toml.get("project", {})
requires_python = project_metadata.get("requires-python", None)
if requires_python is not None:
@@ -134,6 +161,11 @@
def parse_req_python_version(requires_python: str) -> list[TargetVersion] | None:
+ """Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion.
+
+ If parsing fails, will raise a packaging.version.InvalidVersion error.
+ If the parsed version cannot be mapped to a valid TargetVersion, returns None.
+ """
version = Version(requires_python)
if version.release[0] != 3:
return None
@@ -144,6 +176,11 @@
def parse_req_python_specifier(requires_python: str) -> list[TargetVersion] | None:
+ """Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion.
+
+ If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.
+ If the parsed specifier cannot be mapped to a valid TargetVersion, returns None.
+ """
specifier_set = strip_specifier_set(SpecifierSet(requires_python))
if not specifier_set:
return None
@@ -156,6 +193,11 @@
def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet:
+ """Strip minor versions for some specifiers in the specifier set.
+
+ For background on version specifiers, see PEP 440:
+ https://peps.python.org/pep-0440/#version-specifiers
+ """
specifiers = []
for s in specifier_set:
if "*" in str(s):
@@ -177,6 +219,15 @@
@lru_cache
def find_user_pyproject_toml() -> Path:
+ r"""Return the path to the top-level user configuration for black.
+
+ This looks for ~\.black on Windows and ~/.config/black on Linux and other
+ Unix systems.
+
+ May raise:
+ - RuntimeError: if the current user has no homedir
+ - PermissionError: if the current process cannot access the user's homedir
+ """
if sys.platform == "win32":
# Windows
user_config_path = Path.home() / ".black"
@@ -188,6 +239,7 @@
@lru_cache
def get_gitignore(root: Path) -> GitIgnoreSpec:
+ """Return a GitIgnoreSpec matching gitignore content if present."""
gitignore = root / ".gitignore"
lines: list[str] = []
if gitignore.is_file():
@@ -205,6 +257,10 @@ root: Path,
report: Report | None = None,
) -> bool:
+ """
+ Returns whether the path is a symbolic link that points outside the
+ root directory. Also returns True if we failed to resolve the path.
+ """
try:
resolved_path = _cached_resolve(path)
except OSError as e:
@@ -274,6 +330,14 @@ verbose: bool,
quiet: bool,
) -> Iterator[Path]:
+ """Generate all files under `path` whose paths are not excluded by the
+ `exclude_regex`, `extend_exclude`, or `force_exclude` regexes,
+ but are included by the `include` regex.
+
+ Symbolic links pointing outside of the `root` directory are ignored.
+
+ `report` is where output about exclusions goes.
+ """
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
for child in paths:
@@ -345,10 +409,18 @@ def wrap_stream_for_windows(
f: io.TextIOWrapper,
) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]:
+ """
+ Wrap stream with colorama's wrap_stream so colors are shown on Windows.
+
+ If `colorama` is unavailable, the original stream is returned unmodified.
+ Otherwise, the `wrap_stream()` function determines whether the stream needs
+ to be wrapped for a Windows environment and will accordingly either return
+ an `AnsiToWin32` wrapper or the original stream.
+ """
try:
from colorama.initialise import wrap_stream
except ImportError:
return f
else:
# Set `strip=False` to avoid needing to modify test_express_diff_with_color.
- return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True)+ return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True)
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/files.py |
Document functions with clear intent |
from blib2to3.pytree import Leaf
def format_hex(text: str) -> str:
before, after = text[:2], text[2:]
return f"{before}{after.upper()}"
def format_scientific_notation(text: str) -> str:
before, after = text.split("e")
sign = ""
if after.startswith("-"):
after = after[1:]
sign = "-"
elif after.startswith("+"):
after = after[1:]
before = format_float_or_int_string(before)
return f"{before}e{sign}{after}"
def format_complex_number(text: str) -> str:
number = text[:-1]
suffix = text[-1]
return f"{format_float_or_int_string(number)}{suffix}"
def format_float_or_int_string(text: str) -> str:
if "." not in text:
return text
before, after = text.split(".")
return f"{before or 0}.{after or 0}"
def normalize_numeric_literal(leaf: Leaf) -> None:
text = leaf.value.lower()
if text.startswith(("0o", "0b")):
# Leave octal and binary literals alone.
pass
elif text.startswith("0x"):
text = format_hex(text)
elif "e" in text:
text = format_scientific_notation(text)
elif text.endswith("j"):
text = format_complex_number(text)
else:
text = format_float_or_int_string(text)
leaf.value = text | --- +++ @@ -1,13 +1,20 @@+"""
+Formatting numeric literals.
+"""
from blib2to3.pytree import Leaf
def format_hex(text: str) -> str:
+ """
+ Formats a hexadecimal string like "0x12B3"
+ """
before, after = text[:2], text[2:]
return f"{before}{after.upper()}"
def format_scientific_notation(text: str) -> str:
+ """Formats a numeric string utilizing scientific notation"""
before, after = text.split("e")
sign = ""
if after.startswith("-"):
@@ -20,12 +27,14 @@
def format_complex_number(text: str) -> str:
+ """Formats a complex string like `10j`"""
number = text[:-1]
suffix = text[-1]
return f"{format_float_or_int_string(number)}{suffix}"
def format_float_or_int_string(text: str) -> str:
+ """Formats a float string like "1.0"."""
if "." not in text:
return text
@@ -34,6 +43,9 @@
def normalize_numeric_literal(leaf: Leaf) -> None:
+ """Normalizes numeric (float, int, and complex) literals.
+
+ All letters used in the representation are normalized to lowercase."""
text = leaf.value.lower()
if text.startswith(("0o", "0b")):
# Leave octal and binary literals alone.
@@ -46,4 +58,4 @@ text = format_complex_number(text)
else:
text = format_float_or_int_string(text)
- leaf.value = text+ leaf.value = text
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/numerics.py |
Add professional docstrings to my codebase |
from collections.abc import Iterator
from typing import Final, Generic, Literal, TypeGuard, TypeVar, Union
from mypy_extensions import mypyc_attr
from black.cache import CACHE_DIR
from black.mode import Mode, Preview
from black.strings import get_string_prefix, has_triple_quotes
from blib2to3 import pygram
from blib2to3.pgen2 import token
from blib2to3.pytree import NL, Leaf, Node, type_repr
pygram.initialize(CACHE_DIR)
syms: Final = pygram.python_symbols
# types
T = TypeVar("T")
LN = Union[Leaf, Node]
LeafID = int
NodeType = int
WHITESPACE: Final = {token.DEDENT, token.INDENT, token.NEWLINE}
STATEMENT: Final = {
syms.if_stmt,
syms.while_stmt,
syms.for_stmt,
syms.try_stmt,
syms.except_clause,
syms.with_stmt,
syms.funcdef,
syms.classdef,
syms.match_stmt,
syms.case_block,
}
STANDALONE_COMMENT: Final = 153
token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT"
LOGIC_OPERATORS: Final = {"and", "or"}
COMPARATORS: Final = {
token.LESS,
token.GREATER,
token.EQEQUAL,
token.NOTEQUAL,
token.LESSEQUAL,
token.GREATEREQUAL,
}
MATH_OPERATORS: Final = {
token.VBAR,
token.CIRCUMFLEX,
token.AMPER,
token.LEFTSHIFT,
token.RIGHTSHIFT,
token.PLUS,
token.MINUS,
token.STAR,
token.SLASH,
token.DOUBLESLASH,
token.PERCENT,
token.AT,
token.TILDE,
token.DOUBLESTAR,
}
STARS: Final = {token.STAR, token.DOUBLESTAR}
VARARGS_SPECIALS: Final = STARS | {token.SLASH}
VARARGS_PARENTS: Final = {
syms.arglist,
syms.argument, # double star in arglist
syms.trailer, # single argument to call
syms.typedargslist,
syms.varargslist, # lambdas
}
UNPACKING_PARENTS: Final = {
syms.atom, # single element of a list or set literal
syms.dictsetmaker,
syms.listmaker,
syms.testlist_gexp,
syms.testlist_star_expr,
syms.subject_expr,
syms.pattern,
}
TEST_DESCENDANTS: Final = {
syms.test,
syms.lambdef,
syms.or_test,
syms.and_test,
syms.not_test,
syms.comparison,
syms.star_expr,
syms.expr,
syms.xor_expr,
syms.and_expr,
syms.shift_expr,
syms.arith_expr,
syms.trailer,
syms.term,
syms.power,
syms.namedexpr_test,
}
TYPED_NAMES: Final = {syms.tname, syms.tname_star}
ASSIGNMENTS: Final = {
"=",
"+=",
"-=",
"*=",
"@=",
"/=",
"%=",
"&=",
"|=",
"^=",
"<<=",
">>=",
"**=",
"//=",
":",
}
IMPLICIT_TUPLE: Final = {syms.testlist, syms.testlist_star_expr, syms.exprlist}
BRACKET: Final = {
token.LPAR: token.RPAR,
token.LSQB: token.RSQB,
token.LBRACE: token.RBRACE,
}
OPENING_BRACKETS: Final = set(BRACKET.keys())
CLOSING_BRACKETS: Final = set(BRACKET.values())
BRACKETS: Final = OPENING_BRACKETS | CLOSING_BRACKETS
ALWAYS_NO_SPACE: Final = CLOSING_BRACKETS | {
token.COMMA,
STANDALONE_COMMENT,
token.FSTRING_MIDDLE,
token.FSTRING_END,
token.TSTRING_MIDDLE,
token.TSTRING_END,
token.BANG,
}
RARROW = 55
@mypyc_attr(allow_interpreted_subclasses=True)
class Visitor(Generic[T]):
def visit(self, node: LN) -> Iterator[T]:
if node.type < 256:
name = token.tok_name[node.type]
else:
name = str(type_repr(node.type))
# We explicitly branch on whether a visitor exists (instead of
# using self.visit_default as the default arg to getattr) in order
# to save needing to create a bound method object and so mypyc can
# generate a native call to visit_default.
visitf = getattr(self, f"visit_{name}", None)
if visitf:
yield from visitf(node)
else:
yield from self.visit_default(node)
def visit_default(self, node: LN) -> Iterator[T]:
if isinstance(node, Node):
for child in node.children:
yield from self.visit(child)
def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str:
NO: Final[str] = ""
SPACE: Final[str] = " "
DOUBLESPACE: Final[str] = " "
t = leaf.type
p = leaf.parent
v = leaf.value
if t in ALWAYS_NO_SPACE:
return NO
if t == token.COMMENT:
return DOUBLESPACE
assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}"
if t == token.COLON and p.type not in {
syms.subscript,
syms.subscriptlist,
syms.sliceop,
}:
return NO
if t == token.LBRACE and p.type in (
syms.fstring_replacement_field,
syms.tstring_replacement_field,
):
return NO
prev = leaf.prev_sibling
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type in OPENING_BRACKETS:
return NO
if t == token.COLON:
if prevp.type == token.COLON:
return NO
elif prevp.type != token.COMMA and not complex_subscript:
return NO
return SPACE
if prevp.type == token.EQUAL:
if prevp.parent:
if prevp.parent.type in {
syms.arglist,
syms.argument,
syms.parameters,
syms.varargslist,
}:
return NO
elif prevp.parent.type == syms.typedargslist:
# A bit hacky: if the equal sign has whitespace, it means we
# previously found it's a typed argument. So, we're using
# that, too.
return prevp.prefix
elif (
prevp.type == token.STAR
and parent_type(prevp) == syms.star_expr
and parent_type(prevp.parent) in (syms.subscriptlist, syms.tname_star)
):
# No space between typevar tuples or unpacking them.
return NO
elif prevp.type in VARARGS_SPECIALS:
if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS):
return NO
elif prevp.type == token.COLON:
if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}:
return SPACE if complex_subscript else NO
elif (
prevp.parent
and prevp.parent.type == syms.factor
and prevp.type in MATH_OPERATORS
):
return NO
elif prevp.type == token.AT and p.parent and p.parent.type == syms.decorator:
# no space in decorators
return NO
elif prev.type in OPENING_BRACKETS:
return NO
elif prev.type == token.BANG:
return NO
if p.type in {syms.parameters, syms.arglist}:
# untyped function signatures or calls
if not prev or prev.type != token.COMMA:
return NO
elif p.type == syms.varargslist:
# lambdas
if prev and prev.type != token.COMMA:
return NO
elif p.type == syms.typedargslist:
# typed function signatures
if not prev:
return NO
if t == token.EQUAL:
if prev.type not in TYPED_NAMES:
return NO
elif prev.type == token.EQUAL:
# A bit hacky: if the equal sign has whitespace, it means we
# previously found it's a typed argument. So, we're using that, too.
return prev.prefix
elif prev.type != token.COMMA:
return NO
elif p.type in TYPED_NAMES:
# type names
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type != token.COMMA:
return NO
elif p.type == syms.trailer:
# attributes and calls
if t == token.LPAR or t == token.RPAR:
return NO
if not prev:
if t == token.DOT or t == token.LSQB:
return NO
elif prev.type != token.COMMA:
return NO
elif p.type == syms.argument:
# single argument
if t == token.EQUAL:
return NO
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type == token.LPAR:
return NO
elif prev.type in {token.EQUAL} | VARARGS_SPECIALS:
return NO
elif p.type == syms.decorator:
# decorators
return NO
elif p.type == syms.dotted_name:
if prev:
return NO
prevp = preceding_leaf(p)
if not prevp or prevp.type == token.AT or prevp.type == token.DOT:
return NO
elif p.type == syms.classdef:
if t == token.LPAR:
return NO
if prev and prev.type == token.LPAR:
return NO
elif p.type in {syms.subscript, syms.sliceop}:
# indexing
if not prev:
assert p.parent is not None, "subscripts are always parented"
if p.parent.type == syms.subscriptlist:
return SPACE
return NO
elif t == token.COLONEQUAL or prev.type == token.COLONEQUAL:
return SPACE
elif not complex_subscript:
return NO
elif p.type == syms.atom:
if prev and t == token.DOT:
# dots, but not the first one.
return NO
elif p.type == syms.dictsetmaker:
# dict unpacking
if prev and prev.type == token.DOUBLESTAR:
return NO
elif p.type in {syms.factor, syms.star_expr}:
# unary ops
if not prev:
prevp = preceding_leaf(p)
if not prevp or prevp.type in OPENING_BRACKETS:
return NO
prevp_parent = prevp.parent
assert prevp_parent is not None
if prevp.type == token.COLON and prevp_parent.type in {
syms.subscript,
syms.sliceop,
}:
return NO
elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument:
return NO
elif t in {token.NAME, token.NUMBER, token.STRING}:
return NO
elif p.type == syms.import_from:
if t == token.DOT:
if prev and prev.type == token.DOT:
return NO
elif t == token.NAME:
if v == "import":
return SPACE
if prev and prev.type == token.DOT:
return NO
elif p.type == syms.sliceop:
return NO
elif p.type == syms.except_clause:
if t == token.STAR:
return NO
if Preview.simplify_power_operator_hugging in mode:
# Power operator hugging
if t == token.DOUBLESTAR and is_simple_exponentiation(p):
return NO
prevp = preceding_leaf(leaf)
if prevp and prevp.type == token.DOUBLESTAR:
if prevp.parent and is_simple_exponentiation(prevp.parent):
return NO
return SPACE
def make_simple_prefix(nl_count: int, form_feed: bool, empty_line: str = "\n") -> str:
if form_feed:
return (empty_line * (nl_count - 1)) + "\f" + empty_line
return empty_line * nl_count
def preceding_leaf(node: LN | None) -> Leaf | None:
while node:
res = node.prev_sibling
if res:
if isinstance(res, Leaf):
return res
try:
return list(res.leaves())[-1]
except IndexError:
return None
node = node.parent
return None
def prev_siblings_are(node: LN | None, tokens: list[NodeType | None]) -> bool:
if not tokens:
return True
if tokens[-1] is None:
return node is None
if not node:
return False
if node.type != tokens[-1]:
return False
return prev_siblings_are(node.prev_sibling, tokens[:-1])
def parent_type(node: LN | None) -> NodeType | None:
if node is None or node.parent is None:
return None
return node.parent.type
def child_towards(ancestor: Node, descendant: LN) -> LN | None:
node: LN | None = descendant
while node and node.parent != ancestor:
node = node.parent
return node
def replace_child(old_child: LN, new_child: LN) -> None:
parent = old_child.parent
if not parent:
return
child_idx = old_child.remove()
if child_idx is not None:
parent.insert_child(child_idx, new_child)
def container_of(leaf: Leaf) -> LN:
same_prefix = leaf.prefix
container: LN = leaf
while container:
parent = container.parent
if parent is None:
break
if parent.children[0].prefix != same_prefix:
break
if parent.type == syms.file_input:
break
if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS:
break
container = parent
return container
def first_leaf_of(node: LN) -> Leaf | None:
if isinstance(node, Leaf):
return node
if node.children:
return first_leaf_of(node.children[0])
else:
return None
def is_arith_like(node: LN) -> bool:
return node.type in {
syms.arith_expr,
syms.shift_expr,
syms.xor_expr,
syms.and_expr,
}
def is_simple_exponentiation(node: LN) -> bool:
def is_simple(node: LN) -> bool:
if isinstance(node, Leaf):
return node.type in (token.NAME, token.NUMBER, token.DOT, token.DOUBLESTAR)
elif node.type == syms.factor: # unary operators
return is_simple(node.children[1])
else:
return all(is_simple(child) for child in node.children)
return (
node.type == syms.power
and len(node.children) >= 3
and node.children[-2].type == token.DOUBLESTAR
and is_simple(node)
)
def is_docstring(node: NL) -> bool:
if isinstance(node, Leaf):
if node.type != token.STRING:
return False
prefix = get_string_prefix(node.value)
if set(prefix).intersection("bBfF"):
return False
if (
node.parent
and node.parent.type == syms.simple_stmt
and not node.parent.prev_sibling
and node.parent.parent
and node.parent.parent.type == syms.file_input
):
return True
if prev_siblings_are(
node.parent, [None, token.NEWLINE, token.INDENT, syms.simple_stmt]
):
return True
# Multiline docstring on the same line as the `def`.
if prev_siblings_are(node.parent, [syms.parameters, token.COLON, syms.simple_stmt]):
# `syms.parameters` is only used in funcdefs and async_funcdefs in the Python
# grammar. We're safe to return True without further checks.
return True
return False
def is_empty_tuple(node: LN) -> bool:
return (
node.type == syms.atom
and len(node.children) == 2
and node.children[0].type == token.LPAR
and node.children[1].type == token.RPAR
)
def is_one_tuple(node: LN) -> bool:
if node.type == syms.atom:
gexp = unwrap_singleton_parenthesis(node)
if gexp is None or gexp.type != syms.testlist_gexp:
return False
return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA
return (
node.type in IMPLICIT_TUPLE
and len(node.children) == 2
and node.children[1].type == token.COMMA
)
def is_tuple(node: LN) -> bool:
if node.type != syms.atom:
return False
gexp = unwrap_singleton_parenthesis(node)
if gexp is None or gexp.type != syms.testlist_gexp:
return False
return True
def is_tuple_containing_walrus(node: LN) -> bool:
if node.type != syms.atom:
return False
gexp = unwrap_singleton_parenthesis(node)
if gexp is None or gexp.type != syms.testlist_gexp:
return False
return any(child.type == syms.namedexpr_test for child in gexp.children)
def is_tuple_containing_star(node: LN) -> bool:
if node.type != syms.atom:
return False
gexp = unwrap_singleton_parenthesis(node)
if gexp is None or gexp.type != syms.testlist_gexp:
return False
return any(child.type == syms.star_expr for child in gexp.children)
def is_generator(node: LN) -> bool:
if node.type != syms.atom:
return False
gexp = unwrap_singleton_parenthesis(node)
if gexp is None or gexp.type != syms.testlist_gexp:
return False
return any(child.type == syms.old_comp_for for child in gexp.children)
def is_one_sequence_between(
opening: Leaf,
closing: Leaf,
leaves: list[Leaf],
brackets: tuple[int, int] = (token.LPAR, token.RPAR),
) -> bool:
if (opening.type, closing.type) != brackets:
return False
depth = closing.bracket_depth + 1
for _opening_index, leaf in enumerate(leaves):
if leaf is opening:
break
else:
return False
commas = 0
_opening_index += 1
for leaf in leaves[_opening_index:]:
if leaf is closing:
break
bracket_depth = leaf.bracket_depth
if bracket_depth == depth and leaf.type == token.COMMA:
commas += 1
if leaf.parent and leaf.parent.type in {
syms.arglist,
syms.typedargslist,
}:
commas += 1
break
return commas < 2
def is_walrus_assignment(node: LN) -> bool:
inner = unwrap_singleton_parenthesis(node)
return inner is not None and inner.type == syms.namedexpr_test
def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool:
return node.type == syms.trailer and (
(
len(node.children) == 2
and node.children[0].type == token.DOT
and node.children[1].type == token.NAME
)
# last trailer can be an argument-less parentheses pair
or (
last
and len(node.children) == 2
and node.children[0].type == token.LPAR
and node.children[1].type == token.RPAR
)
# last trailer can be arguments
or (
last
and len(node.children) == 3
and node.children[0].type == token.LPAR
# and node.children[1].type == syms.argument
and node.children[2].type == token.RPAR
)
)
def is_simple_decorator_expression(node: LN) -> bool:
if node.type == token.NAME:
return True
if node.type == syms.power:
if node.children:
return (
node.children[0].type == token.NAME
and all(map(is_simple_decorator_trailer, node.children[1:-1]))
and (
len(node.children) < 2
or is_simple_decorator_trailer(node.children[-1], last=True)
)
)
return False
def is_yield(node: LN) -> bool:
if node.type == syms.yield_expr:
return True
if is_name_token(node) and node.value == "yield":
return True
if node.type != syms.atom:
return False
if len(node.children) != 3:
return False
lpar, expr, rpar = node.children
if lpar.type == token.LPAR and rpar.type == token.RPAR:
return is_yield(expr)
return False
def is_vararg(leaf: Leaf, within: set[NodeType]) -> bool:
if leaf.type not in VARARGS_SPECIALS or not leaf.parent:
return False
p = leaf.parent
if p.type == syms.star_expr:
# Star expressions are also used as assignment targets in extended
# iterable unpacking (PEP 3132). See what its parent is instead.
if not p.parent:
return False
p = p.parent
return p.type in within
def is_fstring(node: Node) -> bool:
return node.type == syms.fstring
def fstring_tstring_to_string(node: Node) -> Leaf:
string_without_prefix = str(node)[len(node.prefix) :]
string_leaf = Leaf(token.STRING, string_without_prefix, prefix=node.prefix)
string_leaf.lineno = node.get_lineno() or 0
return string_leaf
def is_multiline_string(node: LN) -> bool:
if isinstance(node, Node) and is_fstring(node):
leaf = fstring_tstring_to_string(node)
elif isinstance(node, Leaf):
leaf = node
else:
return False
return has_triple_quotes(leaf.value) and "\n" in leaf.value
def is_parent_function_or_class(node: Node) -> bool:
assert node.type in {syms.suite, syms.simple_stmt}
assert node.parent is not None
# Note this works for suites / simple_stmts in async def as well
return node.parent.type in {syms.funcdef, syms.classdef}
def is_stub_suite(node: Node) -> bool:
if node.parent is not None and not is_parent_function_or_class(node):
return False
# If there is a comment, we want to keep it.
if node.prefix.strip():
return False
if (
len(node.children) != 4
or node.children[0].type != token.NEWLINE
or node.children[1].type != token.INDENT
or node.children[3].type != token.DEDENT
):
return False
if node.children[3].prefix.strip():
return False
return is_stub_body(node.children[2])
def is_stub_body(node: LN) -> bool:
if not isinstance(node, Node) or node.type != syms.simple_stmt:
return False
if len(node.children) != 2:
return False
child = node.children[0]
return (
not child.prefix.strip()
and child.type == syms.atom
and len(child.children) == 3
and all(leaf == Leaf(token.DOT, ".") for leaf in child.children)
)
def is_atom_with_invisible_parens(node: LN) -> bool:
if isinstance(node, Leaf) or node.type != syms.atom:
return False
first, last = node.children[0], node.children[-1]
return (
isinstance(first, Leaf)
and first.type == token.LPAR
and first.value == ""
and isinstance(last, Leaf)
and last.type == token.RPAR
and last.value == ""
)
def is_empty_par(leaf: Leaf) -> bool:
return is_empty_lpar(leaf) or is_empty_rpar(leaf)
def is_empty_lpar(leaf: Leaf) -> bool:
return leaf.type == token.LPAR and leaf.value == ""
def is_empty_rpar(leaf: Leaf) -> bool:
return leaf.type == token.RPAR and leaf.value == ""
def is_import(leaf: Leaf) -> bool:
p = leaf.parent
t = leaf.type
v = leaf.value
return bool(
t == token.NAME
and (
(v == "import" and p and p.type == syms.import_name)
or (v == "from" and p and p.type == syms.import_from)
)
)
def is_with_or_async_with_stmt(leaf: Leaf) -> bool:
return bool(
leaf.type == token.NAME
and leaf.value == "with"
and leaf.parent
and leaf.parent.type == syms.with_stmt
) or bool(
leaf.type == token.ASYNC
and leaf.next_sibling
and leaf.next_sibling.type == syms.with_stmt
)
def is_async_stmt_or_funcdef(leaf: Leaf) -> bool:
return bool(
leaf.type == token.ASYNC
and leaf.parent
and leaf.parent.type in {syms.async_stmt, syms.async_funcdef}
)
def is_type_comment(leaf: Leaf, mode: Mode) -> bool:
t = leaf.type
v = leaf.value
return t in {token.COMMENT, STANDALONE_COMMENT} and is_type_comment_string(v, mode)
def is_type_comment_string(value: str, mode: Mode) -> bool:
return value.startswith("#") and value[1:].lstrip().startswith("type:")
def is_type_ignore_comment(leaf: Leaf, mode: Mode) -> bool:
t = leaf.type
v = leaf.value
return t in {token.COMMENT, STANDALONE_COMMENT} and is_type_ignore_comment_string(
v, mode
)
def is_type_ignore_comment_string(value: str, mode: Mode) -> bool:
return is_type_comment_string(value, mode) and value.split(":", 1)[
1
].lstrip().startswith("ignore")
def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None:
lpar = Leaf(token.LPAR, "(" if visible else "")
rpar = Leaf(token.RPAR, ")" if visible else "")
prefix = child.prefix
child.prefix = ""
index = child.remove() or 0
new_child = Node(syms.atom, [lpar, child, rpar])
new_child.prefix = prefix
parent.insert_child(index, new_child)
def unwrap_singleton_parenthesis(node: LN) -> LN | None:
if len(node.children) != 3:
return None
lpar, wrapped, rpar = node.children
if not (lpar.type == token.LPAR and rpar.type == token.RPAR):
return None
return wrapped
def ensure_visible(leaf: Leaf) -> None:
if leaf.type == token.LPAR:
leaf.value = "("
elif leaf.type == token.RPAR:
leaf.value = ")"
def is_name_token(nl: NL) -> TypeGuard[Leaf]:
return nl.type == token.NAME
def is_lpar_token(nl: NL) -> TypeGuard[Leaf]:
return nl.type == token.LPAR
def is_rpar_token(nl: NL) -> TypeGuard[Leaf]:
return nl.type == token.RPAR
def is_number_token(nl: NL) -> TypeGuard[Leaf]:
return nl.type == token.NUMBER
def get_annotation_type(leaf: Leaf) -> Literal["return", "param", None]:
ancestor = leaf.parent
while ancestor is not None:
if ancestor.prev_sibling and ancestor.prev_sibling.type == token.RARROW:
return "return"
if ancestor.parent and ancestor.parent.type == syms.tname:
return "param"
ancestor = ancestor.parent
return None
def is_part_of_annotation(leaf: Leaf) -> bool:
assert leaf.parent is not None
return get_annotation_type(leaf) is not None
def first_leaf(node: LN) -> Leaf | None:
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return first_leaf(node.children[0])
def last_leaf(node: LN) -> Leaf | None:
if isinstance(node, Leaf):
return node
elif not node.children:
return None
else:
return last_leaf(node.children[-1])
def furthest_ancestor_with_last_leaf(leaf: Leaf) -> LN:
node: LN = leaf
while node.parent and node.parent.children and node is node.parent.children[-1]:
node = node.parent
return node
def has_sibling_with_type(node: LN, type: int) -> bool:
# Check previous siblings
sibling = node.prev_sibling
while sibling is not None:
if sibling.type == type:
return True
sibling = sibling.prev_sibling
# Check next siblings
sibling = node.next_sibling
while sibling is not None:
if sibling.type == type:
return True
sibling = sibling.next_sibling
return False | --- +++ @@ -1,3 +1,6 @@+"""
+blib2to3 Node/Leaf transformation-related utility functions.
+"""
from collections.abc import Iterator
from typing import Final, Generic, Literal, TypeGuard, TypeVar, Union
@@ -141,8 +144,18 @@
@mypyc_attr(allow_interpreted_subclasses=True)
class Visitor(Generic[T]):
+ """Basic lib2to3 visitor that yields things of type `T` on `visit()`."""
def visit(self, node: LN) -> Iterator[T]:
+ """Main method to visit `node` and its children.
+
+ It tries to find a `visit_*()` method for the given `node.type`, like
+ `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects.
+ If no dedicated `visit_*()` method is found, chooses `visit_default()`
+ instead.
+
+ Then yields objects of type `T` from the selected visitor.
+ """
if node.type < 256:
name = token.tok_name[node.type]
else:
@@ -158,12 +171,18 @@ yield from self.visit_default(node)
def visit_default(self, node: LN) -> Iterator[T]:
+ """Default `visit_*()` implementation. Recurses to children of `node`."""
if isinstance(node, Node):
for child in node.children:
yield from self.visit(child)
def whitespace(leaf: Leaf, *, complex_subscript: bool, mode: Mode) -> str:
+ """Return whitespace prefix if needed for the given `leaf`.
+
+ `complex_subscript` signals whether the given leaf is part of a subscription
+ which has non-trivial arguments, like arithmetic expressions or function calls.
+ """
NO: Final[str] = ""
SPACE: Final[str] = " "
DOUBLESPACE: Final[str] = " "
@@ -410,12 +429,14 @@
def make_simple_prefix(nl_count: int, form_feed: bool, empty_line: str = "\n") -> str:
+ """Generate a normalized prefix string."""
if form_feed:
return (empty_line * (nl_count - 1)) + "\f" + empty_line
return empty_line * nl_count
def preceding_leaf(node: LN | None) -> Leaf | None:
+ """Return the first leaf that precedes `node`, if any."""
while node:
res = node.prev_sibling
if res:
@@ -433,6 +454,10 @@
def prev_siblings_are(node: LN | None, tokens: list[NodeType | None]) -> bool:
+ """Return if the `node` and its previous siblings match types against the provided
+ list of tokens; the provided `node`has its type matched against the last element in
+ the list. `None` can be used as the first element to declare that the start of the
+ list is anchored at the start of its parent's children."""
if not tokens:
return True
if tokens[-1] is None:
@@ -445,6 +470,12 @@
def parent_type(node: LN | None) -> NodeType | None:
+ """
+ Returns:
+ @node.parent.type, if @node is not None and has a parent.
+ OR
+ None, otherwise.
+ """
if node is None or node.parent is None:
return None
@@ -452,6 +483,7 @@
def child_towards(ancestor: Node, descendant: LN) -> LN | None:
+ """Return the child of `ancestor` that contains `descendant`."""
node: LN | None = descendant
while node and node.parent != ancestor:
node = node.parent
@@ -459,6 +491,13 @@
def replace_child(old_child: LN, new_child: LN) -> None:
+ """
+ Side Effects:
+ * If @old_child.parent is set, replace @old_child with @new_child in
+ @old_child's underlying Node structure.
+ OR
+ * Otherwise, this function does nothing.
+ """
parent = old_child.parent
if not parent:
return
@@ -469,6 +508,10 @@
def container_of(leaf: Leaf) -> LN:
+ """Return `leaf` or one of its ancestors that is the topmost container of it.
+
+ By "container" we mean a node where `leaf` is the very first child.
+ """
same_prefix = leaf.prefix
container: LN = leaf
while container:
@@ -490,6 +533,7 @@
def first_leaf_of(node: LN) -> Leaf | None:
+ """Returns the first leaf of the node tree."""
if isinstance(node, Leaf):
return node
if node.children:
@@ -499,6 +543,7 @@
def is_arith_like(node: LN) -> bool:
+ """Whether node is an arithmetic or a binary arithmetic expression"""
return node.type in {
syms.arith_expr,
syms.shift_expr,
@@ -508,6 +553,7 @@
def is_simple_exponentiation(node: LN) -> bool:
+ """Whether whitespace around `**` should be removed."""
def is_simple(node: LN) -> bool:
if isinstance(node, Leaf):
@@ -558,6 +604,7 @@
def is_empty_tuple(node: LN) -> bool:
+ """Return True if `node` holds an empty tuple."""
return (
node.type == syms.atom
and len(node.children) == 2
@@ -567,6 +614,7 @@
def is_one_tuple(node: LN) -> bool:
+ """Return True if `node` holds a tuple with one element, with or without parens."""
if node.type == syms.atom:
gexp = unwrap_singleton_parenthesis(node)
if gexp is None or gexp.type != syms.testlist_gexp:
@@ -582,6 +630,7 @@
def is_tuple(node: LN) -> bool:
+ """Return True if `node` holds a tuple."""
if node.type != syms.atom:
return False
gexp = unwrap_singleton_parenthesis(node)
@@ -592,6 +641,7 @@
def is_tuple_containing_walrus(node: LN) -> bool:
+ """Return True if `node` holds a tuple that contains a walrus operator."""
if node.type != syms.atom:
return False
gexp = unwrap_singleton_parenthesis(node)
@@ -602,6 +652,7 @@
def is_tuple_containing_star(node: LN) -> bool:
+ """Return True if `node` holds a tuple that contains a star operator."""
if node.type != syms.atom:
return False
gexp = unwrap_singleton_parenthesis(node)
@@ -612,6 +663,7 @@
def is_generator(node: LN) -> bool:
+ """Return True if `node` holds a generator."""
if node.type != syms.atom:
return False
gexp = unwrap_singleton_parenthesis(node)
@@ -627,6 +679,7 @@ leaves: list[Leaf],
brackets: tuple[int, int] = (token.LPAR, token.RPAR),
) -> bool:
+ """Return True if content between `opening` and `closing` is a one-sequence."""
if (opening.type, closing.type) != brackets:
return False
@@ -658,11 +711,13 @@
def is_walrus_assignment(node: LN) -> bool:
+ """Return True iff `node` is of the shape ( test := test )"""
inner = unwrap_singleton_parenthesis(node)
return inner is not None and inner.type == syms.namedexpr_test
def is_simple_decorator_trailer(node: LN, last: bool = False) -> bool:
+ """Return True iff `node` is a trailer valid in a simple decorator"""
return node.type == syms.trailer and (
(
len(node.children) == 2
@@ -688,6 +743,14 @@
def is_simple_decorator_expression(node: LN) -> bool:
+ """Return True iff `node` could be a 'dotted name' decorator
+
+ This function takes the node of the 'namedexpr_test' of the new decorator
+ grammar and test if it would be valid under the old decorator grammar.
+
+ The old grammar was: decorator: @ dotted_name [arguments] NEWLINE
+ The new grammar is : decorator: @ namedexpr_test NEWLINE
+ """
if node.type == token.NAME:
return True
if node.type == syms.power:
@@ -704,6 +767,7 @@
def is_yield(node: LN) -> bool:
+ """Return True if `node` holds a `yield` or `yield from` expression."""
if node.type == syms.yield_expr:
return True
@@ -724,6 +788,13 @@
def is_vararg(leaf: Leaf, within: set[NodeType]) -> bool:
+ """Return True if `leaf` is a star or double star in a vararg or kwarg.
+
+ If `within` includes VARARGS_PARENTS, this applies to function signatures.
+ If `within` includes UNPACKING_PARENTS, it applies to right hand-side
+ extended iterable unpacking (PEP 3132) and additional unpacking
+ generalizations (PEP 448).
+ """
if leaf.type not in VARARGS_SPECIALS or not leaf.parent:
return False
@@ -740,10 +811,12 @@
def is_fstring(node: Node) -> bool:
+ """Return True if the node is an f-string"""
return node.type == syms.fstring
def fstring_tstring_to_string(node: Node) -> Leaf:
+ """Converts an fstring or tstring node back to a string node."""
string_without_prefix = str(node)[len(node.prefix) :]
string_leaf = Leaf(token.STRING, string_without_prefix, prefix=node.prefix)
string_leaf.lineno = node.get_lineno() or 0
@@ -751,6 +824,7 @@
def is_multiline_string(node: LN) -> bool:
+ """Return True if `leaf` is a multiline string that actually spans many lines."""
if isinstance(node, Node) and is_fstring(node):
leaf = fstring_tstring_to_string(node)
elif isinstance(node, Leaf):
@@ -769,6 +843,7 @@
def is_stub_suite(node: Node) -> bool:
+ """Return True if `node` is a suite with a stub body."""
if node.parent is not None and not is_parent_function_or_class(node):
return False
@@ -791,6 +866,7 @@
def is_stub_body(node: LN) -> bool:
+ """Return True if `node` is a simple statement containing an ellipsis."""
if not isinstance(node, Node) or node.type != syms.simple_stmt:
return False
@@ -807,6 +883,9 @@
def is_atom_with_invisible_parens(node: LN) -> bool:
+ """Given a `LN`, determines whether it's an atom `node` with invisible
+ parens. Useful in dedupe-ing and normalizing parens.
+ """
if isinstance(node, Leaf) or node.type != syms.atom:
return False
@@ -834,6 +913,7 @@
def is_import(leaf: Leaf) -> bool:
+ """Return True if the given leaf starts an import statement."""
p = leaf.parent
t = leaf.type
v = leaf.value
@@ -847,6 +927,7 @@
def is_with_or_async_with_stmt(leaf: Leaf) -> bool:
+ """Return True if the given leaf starts a with or async with statement."""
return bool(
leaf.type == token.NAME
and leaf.value == "with"
@@ -860,6 +941,11 @@
def is_async_stmt_or_funcdef(leaf: Leaf) -> bool:
+ """Return True if the given leaf starts an async def/for/with statement.
+
+ Note that `async def` can be either an `async_stmt` or `async_funcdef`,
+ the latter is used when it has decorators.
+ """
return bool(
leaf.type == token.ASYNC
and leaf.parent
@@ -868,6 +954,10 @@
def is_type_comment(leaf: Leaf, mode: Mode) -> bool:
+ """Return True if the given leaf is a type comment. This function should only
+ be used for general type comments (excluding ignore annotations, which should
+ use `is_type_ignore_comment`). Note that general type comments are no longer
+ used in modern version of Python, this function may be deprecated in the future."""
t = leaf.type
v = leaf.value
return t in {token.COMMENT, STANDALONE_COMMENT} and is_type_comment_string(v, mode)
@@ -878,6 +968,7 @@
def is_type_ignore_comment(leaf: Leaf, mode: Mode) -> bool:
+ """Return True if the given leaf is a type comment with ignore annotation."""
t = leaf.type
v = leaf.value
return t in {token.COMMENT, STANDALONE_COMMENT} and is_type_ignore_comment_string(
@@ -886,12 +977,21 @@
def is_type_ignore_comment_string(value: str, mode: Mode) -> bool:
+ """Return True if the given string match with type comment with
+ ignore annotation."""
return is_type_comment_string(value, mode) and value.split(":", 1)[
1
].lstrip().startswith("ignore")
def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None:
+ """Wrap `child` in parentheses.
+
+ This replaces `child` with an atom holding the parentheses and the old
+ child. That requires moving the prefix.
+
+ If `visible` is False, the leaves will be valueless (and thus invisible).
+ """
lpar = Leaf(token.LPAR, "(" if visible else "")
rpar = Leaf(token.RPAR, ")" if visible else "")
prefix = child.prefix
@@ -903,6 +1003,9 @@
def unwrap_singleton_parenthesis(node: LN) -> LN | None:
+ """Returns `wrapped` if `node` is of the shape ( wrapped ).
+
+ Parenthesis can be optional. Returns None otherwise"""
if len(node.children) != 3:
return None
@@ -914,6 +1017,11 @@
def ensure_visible(leaf: Leaf) -> None:
+ """Make sure parentheses are visible.
+
+ They could be invisible as part of some statements (see
+ :func:`normalize_invisible_parens` and :func:`visit_import_from`).
+ """
if leaf.type == token.LPAR:
leaf.value = "("
elif leaf.type == token.RPAR:
@@ -937,6 +1045,7 @@
def get_annotation_type(leaf: Leaf) -> Literal["return", "param", None]:
+ """Returns the type of annotation this leaf is part of, if any."""
ancestor = leaf.parent
while ancestor is not None:
if ancestor.prev_sibling and ancestor.prev_sibling.type == token.RARROW:
@@ -948,11 +1057,13 @@
def is_part_of_annotation(leaf: Leaf) -> bool:
+ """Returns whether this leaf is part of a type annotation."""
assert leaf.parent is not None
return get_annotation_type(leaf) is not None
def first_leaf(node: LN) -> Leaf | None:
+ """Returns the first leaf of the ancestor node."""
if isinstance(node, Leaf):
return node
elif not node.children:
@@ -962,6 +1073,7 @@
def last_leaf(node: LN) -> Leaf | None:
+ """Returns the last leaf of the ancestor node."""
if isinstance(node, Leaf):
return node
elif not node.children:
@@ -971,6 +1083,7 @@
def furthest_ancestor_with_last_leaf(leaf: Leaf) -> LN:
+ """Returns the furthest ancestor that has this leaf node as the last leaf."""
node: LN = leaf
while node.parent and node.parent.children and node is node.parent.children[-1]:
node = node.parent
@@ -992,4 +1105,4 @@ return True
sibling = sibling.next_sibling
- return False+ return False
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/nodes.py |
Add standardized docstrings across the file | import itertools
import math
from collections.abc import Callable, Iterator, Sequence
from dataclasses import dataclass, field
from typing import Optional, TypeVar, Union, cast
from black.brackets import COMMA_PRIORITY, DOT_PRIORITY, BracketTracker
from black.mode import Mode
from black.nodes import (
BRACKETS,
CLOSING_BRACKETS,
OPENING_BRACKETS,
STANDALONE_COMMENT,
TEST_DESCENDANTS,
child_towards,
is_docstring,
is_import,
is_multiline_string,
is_one_sequence_between,
is_type_comment,
is_type_ignore_comment,
is_with_or_async_with_stmt,
make_simple_prefix,
replace_child,
syms,
whitespace,
)
from black.strings import str_width
from blib2to3.pgen2 import token
from blib2to3.pytree import Leaf, Node
# types
T = TypeVar("T")
Index = int
LeafID = int
LN = Union[Leaf, Node]
@dataclass
class Line:
mode: Mode = field(repr=False)
depth: int = 0
leaves: list[Leaf] = field(default_factory=list)
# keys ordered like `leaves`
comments: dict[LeafID, list[Leaf]] = field(default_factory=dict)
bracket_tracker: BracketTracker = field(default_factory=BracketTracker)
inside_brackets: bool = False
should_split_rhs: bool = False
magic_trailing_comma: Leaf | None = None
def append(
self, leaf: Leaf, preformatted: bool = False, track_bracket: bool = False
) -> None:
has_value = (
leaf.type in BRACKETS
# empty fstring and tstring middles must not be truncated
or leaf.type in (token.FSTRING_MIDDLE, token.TSTRING_MIDDLE)
or bool(leaf.value.strip())
)
if not has_value:
return
if leaf.type == token.COLON and self.is_class_paren_empty:
del self.leaves[-2:]
if self.leaves and not preformatted:
# Note: at this point leaf.prefix should be empty except for
# imports, for which we only preserve newlines.
leaf.prefix += whitespace(
leaf,
complex_subscript=self.is_complex_subscript(leaf),
mode=self.mode,
)
if self.inside_brackets or not preformatted or track_bracket:
self.bracket_tracker.mark(leaf)
if self.mode.magic_trailing_comma:
if self.has_magic_trailing_comma(leaf):
self.magic_trailing_comma = leaf
elif self.has_magic_trailing_comma(leaf):
self.remove_trailing_comma()
if not self.append_comment(leaf):
self.leaves.append(leaf)
def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
if (
self.bracket_tracker.depth == 0
or self.bracket_tracker.any_open_for_or_lambda()
):
if self.is_comment:
raise ValueError("cannot append to standalone comments")
if self.leaves and leaf.type == STANDALONE_COMMENT:
raise ValueError(
"cannot append standalone comments to a populated line"
)
self.append(leaf, preformatted=preformatted)
@property
def is_comment(self) -> bool:
return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
@property
def is_decorator(self) -> bool:
return bool(self) and self.leaves[0].type == token.AT
@property
def is_import(self) -> bool:
return bool(self) and is_import(self.leaves[0])
@property
def is_with_or_async_with_stmt(self) -> bool:
return bool(self) and is_with_or_async_with_stmt(self.leaves[0])
@property
def is_class(self) -> bool:
return (
bool(self)
and self.leaves[0].type == token.NAME
and self.leaves[0].value == "class"
)
@property
def is_stub_class(self) -> bool:
return self.is_class and self.leaves[-3:] == [
Leaf(token.DOT, ".") for _ in range(3)
]
@property
def is_def(self) -> bool:
try:
first_leaf = self.leaves[0]
except IndexError:
return False
try:
second_leaf: Leaf | None = self.leaves[1]
except IndexError:
second_leaf = None
return (first_leaf.type == token.NAME and first_leaf.value == "def") or (
first_leaf.type == token.ASYNC
and second_leaf is not None
and second_leaf.type == token.NAME
and second_leaf.value == "def"
)
@property
def is_stub_def(self) -> bool:
return self.is_def and self.leaves[-4:] == [Leaf(token.COLON, ":")] + [
Leaf(token.DOT, ".") for _ in range(3)
]
@property
def is_class_paren_empty(self) -> bool:
return (
bool(self)
and len(self.leaves) == 4
and self.is_class
and self.leaves[2].type == token.LPAR
and self.leaves[2].value == "("
and self.leaves[3].type == token.RPAR
and self.leaves[3].value == ")"
)
@property
def _is_triple_quoted_string(self) -> bool:
if not self or self.leaves[0].type != token.STRING:
return False
value = self.leaves[0].value
if value.startswith(('"""', "'''")):
return True
if value.startswith(("r'''", 'r"""', "R'''", 'R"""')):
return True
return False
@property
def is_docstring(self) -> bool:
return bool(self) and is_docstring(self.leaves[0])
@property
def is_chained_assignment(self) -> bool:
return [leaf.type for leaf in self.leaves].count(token.EQUAL) > 1
@property
def opens_block(self) -> bool:
if len(self.leaves) == 0:
return False
return self.leaves[-1].type == token.COLON
def is_fmt_pass_converted(
self, *, first_leaf_matches: Callable[[Leaf], bool] | None = None
) -> bool:
if len(self.leaves) != 1:
return False
leaf = self.leaves[0]
if (
leaf.type != STANDALONE_COMMENT
or leaf.fmt_pass_converted_first_leaf is None
):
return False
return first_leaf_matches is None or first_leaf_matches(
leaf.fmt_pass_converted_first_leaf
)
def contains_standalone_comments(self) -> bool:
for leaf in self.leaves:
if leaf.type == STANDALONE_COMMENT:
return True
return False
def contains_implicit_multiline_string_with_comments(self) -> bool:
for leaf_type, leaf_group_iterator in itertools.groupby(
self.leaves, lambda leaf: leaf.type
):
if leaf_type != token.STRING:
continue
leaf_list = list(leaf_group_iterator)
if len(leaf_list) == 1:
continue
for leaf in leaf_list:
if self.comments_after(leaf):
return True
return False
def contains_uncollapsable_type_comments(self) -> bool:
ignored_ids = set()
try:
last_leaf = self.leaves[-1]
ignored_ids.add(id(last_leaf))
if last_leaf.type == token.COMMA or (
last_leaf.type == token.RPAR and not last_leaf.value
):
# When trailing commas or optional parens are inserted by Black for
# consistency, comments after the previous last element are not moved
# (they don't have to, rendering will still be correct). So we ignore
# trailing commas and invisible.
last_leaf = self.leaves[-2]
ignored_ids.add(id(last_leaf))
except IndexError:
return False
# A type comment is uncollapsable if it is attached to a leaf
# that isn't at the end of the line (since that could cause it
# to get associated to a different argument) or if there are
# comments before it (since that could cause it to get hidden
# behind a comment.
comment_seen = False
for leaf_id, comments in self.comments.items():
for comment in comments:
if is_type_comment(comment, mode=self.mode):
if comment_seen or (
not is_type_ignore_comment(comment, mode=self.mode)
and leaf_id not in ignored_ids
):
return True
comment_seen = True
return False
def contains_unsplittable_type_ignore(self) -> bool:
if not self.leaves:
return False
# If a 'type: ignore' is attached to the end of a line, we
# can't split the line, because we can't know which of the
# subexpressions the ignore was meant to apply to.
#
# We only want this to apply to actual physical lines from the
# original source, though: we don't want the presence of a
# 'type: ignore' at the end of a multiline expression to
# justify pushing it all onto one line. Thus we
# (unfortunately) need to check the actual source lines and
# only report an unsplittable 'type: ignore' if this line was
# one line in the original code.
# Grab the first and last line numbers, skipping generated leaves
first_line = next((leaf.lineno for leaf in self.leaves if leaf.lineno != 0), 0)
last_line = next(
(leaf.lineno for leaf in reversed(self.leaves) if leaf.lineno != 0), 0
)
if first_line == last_line:
# We look at the last two leaves since a comma or an
# invisible paren could have been added at the end of the
# line.
for node in self.leaves[-2:]:
for comment in self.comments.get(id(node), []):
if is_type_ignore_comment(comment, mode=self.mode):
return True
return False
def contains_multiline_strings(self) -> bool:
return any(is_multiline_string(leaf) for leaf in self.leaves)
def has_magic_trailing_comma(self, closing: Leaf) -> bool:
if not (
closing.type in CLOSING_BRACKETS
and self.leaves
and self.leaves[-1].type == token.COMMA
):
return False
if closing.type == token.RBRACE:
return True
if closing.type == token.RSQB:
if (
closing.parent is not None
and closing.parent.type == syms.trailer
and closing.opening_bracket is not None
and is_one_sequence_between(
closing.opening_bracket,
closing,
self.leaves,
brackets=(token.LSQB, token.RSQB),
)
):
assert closing.prev_sibling is not None
assert closing.prev_sibling.type == syms.subscriptlist
return False
return True
if self.is_import:
return True
if closing.opening_bracket is not None and not is_one_sequence_between(
closing.opening_bracket, closing, self.leaves
):
return True
return False
def append_comment(self, comment: Leaf) -> bool:
if (
comment.type == STANDALONE_COMMENT
and self.bracket_tracker.any_open_brackets()
):
comment.prefix = ""
return False
if comment.type != token.COMMENT:
return False
if not self.leaves:
comment.type = STANDALONE_COMMENT
comment.prefix = ""
return False
last_leaf = self.leaves[-1]
if (
last_leaf.type == token.RPAR
and not last_leaf.value
and last_leaf.parent
and len(list(last_leaf.parent.leaves())) <= 3
and not is_type_comment(comment, mode=self.mode)
):
# Comments on an optional parens wrapping a single leaf should belong to
# the wrapped node except if it's a type comment. Pinning the comment like
# this avoids unstable formatting caused by comment migration.
if len(self.leaves) < 2:
comment.type = STANDALONE_COMMENT
comment.prefix = ""
return False
last_leaf = self.leaves[-2]
self.comments.setdefault(id(last_leaf), []).append(comment)
return True
def comments_after(self, leaf: Leaf) -> list[Leaf]:
return self.comments.get(id(leaf), [])
def remove_trailing_comma(self) -> None:
trailing_comma = self.leaves.pop()
trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
self.comments.setdefault(id(self.leaves[-1]), []).extend(
trailing_comma_comments
)
def is_complex_subscript(self, leaf: Leaf) -> bool:
open_lsqb = self.bracket_tracker.get_open_lsqb()
if open_lsqb is None:
return False
subscript_start = open_lsqb.next_sibling
if isinstance(subscript_start, Node):
if subscript_start.type == syms.listmaker:
return False
if subscript_start.type == syms.subscriptlist:
subscript_start = child_towards(subscript_start, leaf)
return subscript_start is not None and any(
n.type in TEST_DESCENDANTS for n in subscript_start.pre_order()
)
def enumerate_with_length(
self, is_reversed: bool = False
) -> Iterator[tuple[Index, Leaf, int]]:
op = cast(
Callable[[Sequence[Leaf]], Iterator[tuple[Index, Leaf]]],
enumerate_reversed if is_reversed else enumerate,
)
for index, leaf in op(self.leaves):
length = len(leaf.prefix) + len(leaf.value)
if "\n" in leaf.value:
return # Multiline strings, we can't continue.
for comment in self.comments_after(leaf):
length += len(comment.value)
yield index, leaf, length
def clone(self) -> "Line":
return Line(
mode=self.mode,
depth=self.depth,
inside_brackets=self.inside_brackets,
should_split_rhs=self.should_split_rhs,
magic_trailing_comma=self.magic_trailing_comma,
)
def __str__(self) -> str:
if not self:
return "\n"
indent = " " * self.depth
leaves = iter(self.leaves)
first = next(leaves)
res = f"{first.prefix}{indent}{first.value}"
res += "".join(str(leaf) for leaf in leaves)
comments_iter = itertools.chain.from_iterable(self.comments.values())
comments = [str(comment) for comment in comments_iter]
res += "".join(comments)
return res + "\n"
def __bool__(self) -> bool:
return bool(self.leaves or self.comments)
@dataclass
class RHSResult:
head: Line
body: Line
tail: Line
opening_bracket: Leaf
closing_bracket: Leaf
@dataclass
class LinesBlock:
mode: Mode
previous_block: Optional["LinesBlock"]
original_line: Line
before: int = 0
content_lines: list[str] = field(default_factory=list)
after: int = 0
form_feed: bool = False
def all_lines(self) -> list[str]:
empty_line = str(Line(mode=self.mode))
prefix = make_simple_prefix(self.before, self.form_feed, empty_line)
return [prefix] + self.content_lines + [empty_line * self.after]
@dataclass
class EmptyLineTracker:
mode: Mode
previous_line: Line | None = None
previous_block: LinesBlock | None = None
previous_defs: list[Line] = field(default_factory=list)
semantic_leading_comment: LinesBlock | None = None
def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
form_feed = (
current_line.depth == 0
and bool(current_line.leaves)
and "\f\n" in current_line.leaves[0].prefix
)
before, after = self._maybe_empty_lines(current_line)
previous_after = self.previous_block.after if self.previous_block else 0
before = max(0, before - previous_after)
# Always have one empty line after a module docstring
if self._line_is_module_docstring(current_line):
before = 1
block = LinesBlock(
mode=self.mode,
previous_block=self.previous_block,
original_line=current_line,
before=before,
after=after,
form_feed=form_feed,
)
# Maintain the semantic_leading_comment state.
if current_line.is_comment:
if self.previous_line is None or (
not self.previous_line.is_decorator
# `or before` means this comment already has an empty line before
and (not self.previous_line.is_comment or before)
and (self.semantic_leading_comment is None or before)
):
self.semantic_leading_comment = block
# `or before` means this decorator already has an empty line before
elif not current_line.is_decorator or before:
self.semantic_leading_comment = None
self.previous_line = current_line
self.previous_block = block
return block
def _line_is_module_docstring(self, current_line: Line) -> bool:
previous_block = self.previous_block
if not previous_block:
return False
if (
len(previous_block.original_line.leaves) != 1
or not previous_block.original_line.is_docstring
or current_line.is_class
or current_line.is_def
):
return False
while previous_block := previous_block.previous_block:
if not previous_block.original_line.is_comment:
return False
return True
def _maybe_empty_lines(self, current_line: Line) -> tuple[int, int]:
max_allowed = 1
if current_line.depth == 0:
max_allowed = 1 if self.mode.is_pyi else 2
if current_line.leaves:
# Consume the first leaf's extra newlines.
first_leaf = current_line.leaves[0]
before = first_leaf.prefix.count("\n")
before = min(before, max_allowed)
first_leaf.prefix = ""
else:
before = 0
user_had_newline = bool(before)
depth = current_line.depth
# Mutate self.previous_defs, remainder of this function should be pure
previous_def = None
while self.previous_defs and self.previous_defs[-1].depth >= depth:
previous_def = self.previous_defs.pop()
if current_line.is_def or current_line.is_class:
self.previous_defs.append(current_line)
if self.previous_line is None:
# Don't insert empty lines before the first line in the file.
return 0, 0
if current_line.is_docstring:
if self.previous_line.is_class:
return 0, 1
if self.previous_line.opens_block and self.previous_line.is_def:
return 0, 0
if previous_def is not None:
assert self.previous_line is not None
if self.mode.is_pyi:
if previous_def.is_class and not previous_def.is_stub_class:
before = 1
elif depth and not current_line.is_def and self.previous_line.is_def:
# Empty lines between attributes and methods should be preserved.
before = 1 if user_had_newline else 0
elif depth:
before = 0
else:
before = 1
else:
if depth:
before = 1
elif (
not depth
and previous_def.depth
and current_line.leaves[-1].type == token.COLON
and (
current_line.leaves[0].value
not in ("with", "try", "for", "while", "if", "match")
)
):
# We shouldn't add two newlines between an indented function and
# a dependent non-indented clause. This is to avoid issues with
# conditional function definitions that are technically top-level
# and therefore get two trailing newlines, but look weird and
# inconsistent when they're followed by elif, else, etc. This is
# worse because these functions only get *one* preceding newline
# already.
before = 1
else:
before = 2
if current_line.is_decorator or current_line.is_def or current_line.is_class:
return self._maybe_empty_lines_for_class_or_def(
current_line, before, user_had_newline
)
if (
self.previous_line.is_import
and self.previous_line.depth == 0
and current_line.depth == 0
and not current_line.is_import
and not current_line.is_fmt_pass_converted(first_leaf_matches=is_import)
):
return 1, 0
if (
self.previous_line.is_import
and not current_line.is_import
and not current_line.is_fmt_pass_converted(first_leaf_matches=is_import)
and depth == self.previous_line.depth
):
return (before or 1), 0
return before, 0
def _maybe_empty_lines_for_class_or_def(
self, current_line: Line, before: int, user_had_newline: bool
) -> tuple[int, int]:
assert self.previous_line is not None
if self.previous_line.is_decorator:
if self.mode.is_pyi and current_line.is_stub_class:
# Insert an empty line after a decorated stub class
return 0, 1
return 0, 0
if self.previous_line.depth < current_line.depth and (
self.previous_line.is_class or self.previous_line.is_def
):
if self.mode.is_pyi:
return 0, 0
return 1 if user_had_newline else 0, 0
comment_to_add_newlines: LinesBlock | None = None
if (
self.previous_line.is_comment
and self.previous_line.depth == current_line.depth
and before == 0
):
slc = self.semantic_leading_comment
if (
slc is not None
and slc.previous_block is not None
and not slc.previous_block.original_line.is_class
and not slc.previous_block.original_line.opens_block
and slc.before <= 1
):
comment_to_add_newlines = slc
else:
return 0, 0
if self.mode.is_pyi:
if current_line.is_class or self.previous_line.is_class:
if self.previous_line.depth < current_line.depth:
newlines = 0
elif self.previous_line.depth > current_line.depth:
newlines = 1
elif current_line.is_stub_class and self.previous_line.is_stub_class:
# No blank line between classes with an empty body
newlines = 0
else:
newlines = 1
# Don't inspect the previous line if it's part of the body of the previous
# statement in the same level, we always want a blank line if there's
# something with a body preceding.
elif self.previous_line.depth > current_line.depth:
newlines = 1
elif (
current_line.is_def or current_line.is_decorator
) and not self.previous_line.is_def:
if current_line.depth:
# In classes empty lines between attributes and methods should
# be preserved.
newlines = min(1, before)
else:
# Blank line between a block of functions (maybe with preceding
# decorators) and a block of non-functions
newlines = 1
else:
newlines = 0
else:
newlines = 1 if current_line.depth else 2
# If a user has left no space after a dummy implementation, don't insert
# new lines. This is useful for instance for @overload or Protocols.
if self.previous_line.is_stub_def and not user_had_newline:
newlines = 0
if comment_to_add_newlines is not None:
previous_block = comment_to_add_newlines.previous_block
if previous_block is not None:
comment_to_add_newlines.before = (
max(comment_to_add_newlines.before, newlines) - previous_block.after
)
newlines = 0
return newlines, 0
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[tuple[Index, T]]:
index = len(sequence) - 1
for element in reversed(sequence):
yield (index, element)
index -= 1
def append_leaves(
new_line: Line, old_line: Line, leaves: list[Leaf], preformatted: bool = False
) -> None:
for old_leaf in leaves:
new_leaf = Leaf(old_leaf.type, old_leaf.value)
replace_child(old_leaf, new_leaf)
new_line.append(new_leaf, preformatted=preformatted)
for comment_leaf in old_line.comments_after(old_leaf):
new_line.append(comment_leaf, preformatted=True)
def is_line_short_enough(line: Line, *, mode: Mode, line_str: str = "") -> bool:
if not line_str:
line_str = line_to_string(line)
if line.contains_standalone_comments():
return False
if "\n" not in line_str:
# No multiline strings (MLS) present
return str_width(line_str) <= mode.line_length
first, *_, last = line_str.split("\n")
if str_width(first) > mode.line_length or str_width(last) > mode.line_length:
return False
# Traverse the AST to examine the context of the multiline string (MLS),
# tracking aspects such as depth and comma existence,
# to determine whether to split the MLS or keep it together.
# Depth (which is based on the existing bracket_depth concept)
# is needed to determine nesting level of the MLS.
# Includes special case for trailing commas.
commas: list[int] = [] # tracks number of commas per depth level
multiline_string: Leaf | None = None
# store the leaves that contain parts of the MLS
multiline_string_contexts: list[LN] = []
max_level_to_update: int | float = math.inf # track the depth of the MLS
for i, leaf in enumerate(line.leaves):
if max_level_to_update == math.inf:
had_comma: int | None = None
if leaf.bracket_depth + 1 > len(commas):
commas.append(0)
elif leaf.bracket_depth + 1 < len(commas):
had_comma = commas.pop()
if (
had_comma is not None
and multiline_string is not None
and multiline_string.bracket_depth == leaf.bracket_depth + 1
):
# Have left the level with the MLS, stop tracking commas
max_level_to_update = leaf.bracket_depth
if had_comma > 0:
# MLS was in parens with at least one comma - force split
return False
if leaf.bracket_depth <= max_level_to_update and leaf.type == token.COMMA:
# Inside brackets, ignore trailing comma
# directly after MLS/MLS-containing expression
ignore_ctxs: list[LN | None] = [None]
ignore_ctxs += multiline_string_contexts
if (line.inside_brackets or leaf.bracket_depth > 0) and (
i != len(line.leaves) - 1 or leaf.prev_sibling not in ignore_ctxs
):
commas[leaf.bracket_depth] += 1
if max_level_to_update != math.inf:
max_level_to_update = min(max_level_to_update, leaf.bracket_depth)
if is_multiline_string(leaf):
if leaf.parent and (
leaf.parent.type == syms.test
or (leaf.parent.parent and leaf.parent.parent.type == syms.dictsetmaker)
):
# Keep ternary and dictionary values parenthesized
return False
if len(multiline_string_contexts) > 0:
# >1 multiline string cannot fit on a single line - force split
return False
multiline_string = leaf
ctx: LN = leaf
# fetch the leaf components of the MLS in the AST
while str(ctx) in line_str:
multiline_string_contexts.append(ctx)
if ctx.parent is None:
break
ctx = ctx.parent
# May not have a triple-quoted multiline string at all,
# in case of a regular string with embedded newlines and line continuations
if len(multiline_string_contexts) == 0:
return True
return all(val == 0 for val in commas)
def can_be_split(line: Line) -> bool:
leaves = line.leaves
if len(leaves) < 2:
return False
if leaves[0].type == token.STRING and leaves[1].type == token.DOT:
call_count = 0
dot_count = 0
next = leaves[-1]
for leaf in leaves[-2::-1]:
if leaf.type in OPENING_BRACKETS:
if next.type not in CLOSING_BRACKETS:
return False
call_count += 1
elif leaf.type == token.DOT:
dot_count += 1
elif leaf.type == token.NAME:
if not (next.type == token.DOT or next.type in OPENING_BRACKETS):
return False
elif leaf.type not in CLOSING_BRACKETS:
return False
if dot_count > 1 and call_count > 1:
return False
return True
def can_omit_invisible_parens(
rhs: RHSResult,
line_length: int,
) -> bool:
line = rhs.body
# We can't omit parens if doing so would result in a type: ignore comment
# sharing a line with other comments, as that breaks type: ignore parsing.
# Check if the opening bracket (last leaf of head) has comments that would merge
# with comments from the first line of the body.
if rhs.head.leaves:
opening_bracket = rhs.head.leaves[-1]
head_comments = rhs.head.comments.get(id(opening_bracket), [])
# If there are comments on the opening bracket line, check if any would
# conflict with type: ignore comments in the body
if head_comments:
has_type_ignore_in_head = any(
is_type_ignore_comment(comment, mode=rhs.head.mode)
for comment in head_comments
)
has_other_comment_in_head = any(
not is_type_ignore_comment(comment, mode=rhs.head.mode)
for comment in head_comments
)
# Check for comments in the body that would potentially end up on the
# same line as the head comments when parens are removed
has_type_ignore_in_body = False
has_other_comment_in_body = False
for leaf in rhs.body.leaves:
for comment in rhs.body.comments.get(id(leaf), []):
if is_type_ignore_comment(comment, mode=rhs.body.mode):
has_type_ignore_in_body = True
else:
has_other_comment_in_body = True
# Preserve parens if we have both type: ignore and other comments that
# could end up on the same line
if (has_type_ignore_in_head and has_other_comment_in_body) or (
has_other_comment_in_head and has_type_ignore_in_body
):
return False
# We need optional parens in order to split standalone comments to their own lines
# if there are no nested parens around the standalone comments
closing_bracket: Leaf | None = None
for leaf in reversed(line.leaves):
if closing_bracket and leaf is closing_bracket.opening_bracket:
closing_bracket = None
if leaf.type == STANDALONE_COMMENT and not closing_bracket:
return False
if (
not closing_bracket
and leaf.type in CLOSING_BRACKETS
and leaf.opening_bracket in line.leaves
and leaf.value
):
closing_bracket = leaf
bt = line.bracket_tracker
if not bt.delimiters:
# Without delimiters the optional parentheses are useless.
return True
max_priority = bt.max_delimiter_priority()
delimiter_count = bt.delimiter_count_with_priority(max_priority)
if delimiter_count > 1:
# With more than one delimiter of a kind the optional parentheses read better.
return False
if delimiter_count == 1:
if max_priority == COMMA_PRIORITY and rhs.head.is_with_or_async_with_stmt:
# For two context manager with statements, the optional parentheses read
# better. In this case, `rhs.body` is the context managers part of
# the with statement. `rhs.head` is the `with (` part on the previous
# line.
return False
# Otherwise it may also read better, but we don't do it today and requires
# careful considerations for all possible cases. See
# https://github.com/psf/black/issues/2156.
if max_priority == DOT_PRIORITY:
# A single stranded method call doesn't require optional parentheses.
return True
assert len(line.leaves) >= 2, "Stranded delimiter"
# With a single delimiter, omit if the expression starts or ends with
# a bracket.
first = line.leaves[0]
second = line.leaves[1]
if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS:
if _can_omit_opening_paren(line, first=first, line_length=line_length):
return True
# Note: we are not returning False here because a line might have *both*
# a leading opening bracket and a trailing closing bracket. If the
# opening bracket doesn't match our rule, maybe the closing will.
penultimate = line.leaves[-2]
last = line.leaves[-1]
if (
last.type == token.RPAR
or last.type == token.RBRACE
or (
# don't use indexing for omitting optional parentheses;
# it looks weird
last.type == token.RSQB
and last.parent
and last.parent.type != syms.trailer
)
):
if penultimate.type in OPENING_BRACKETS:
# Empty brackets don't help.
return False
if is_multiline_string(first):
# Additional wrapping of a multiline string in this situation is
# unnecessary.
return True
if _can_omit_closing_paren(line, last=last, line_length=line_length):
return True
return False
def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool:
remainder = False
length = 4 * line.depth
_index = -1
for _index, leaf, leaf_length in line.enumerate_with_length():
if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first:
remainder = True
if remainder:
length += leaf_length
if length > line_length:
break
if leaf.type in OPENING_BRACKETS:
# There are brackets we can further split on.
remainder = False
else:
# checked the entire string and line length wasn't exceeded
if len(line.leaves) == _index + 1:
return True
return False
def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool:
length = 4 * line.depth
seen_other_brackets = False
for _index, leaf, leaf_length in line.enumerate_with_length():
length += leaf_length
if leaf is last.opening_bracket:
if seen_other_brackets or length <= line_length:
return True
elif leaf.type in OPENING_BRACKETS:
# There are brackets we can further split on.
seen_other_brackets = True
return False
def line_to_string(line: Line) -> str:
return str(line).strip("\n") | --- +++ @@ -38,6 +38,7 @@
@dataclass
class Line:
+ """Holds leaves and comments. Can be printed with `str(line)`."""
mode: Mode = field(repr=False)
depth: int = 0
@@ -52,6 +53,15 @@ def append(
self, leaf: Leaf, preformatted: bool = False, track_bracket: bool = False
) -> None:
+ """Add a new `leaf` to the end of the line.
+
+ Unless `preformatted` is True, the `leaf` will receive a new consistent
+ whitespace prefix and metadata applied by :class:`BracketTracker`.
+ Trailing commas are maybe removed, unpacked for loop variables are
+ demoted from being delimiters.
+
+ Inline comments are put aside.
+ """
has_value = (
leaf.type in BRACKETS
# empty fstring and tstring middles must not be truncated
@@ -82,6 +92,11 @@ self.leaves.append(leaf)
def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None:
+ """Like :func:`append()` but disallow invalid standalone comment structure.
+
+ Raises ValueError when any `leaf` is appended after a standalone comment
+ or when a standalone comment is not the first leaf on the line.
+ """
if (
self.bracket_tracker.depth == 0
or self.bracket_tracker.any_open_for_or_lambda()
@@ -98,22 +113,27 @@
@property
def is_comment(self) -> bool:
+ """Is this line a standalone comment?"""
return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT
@property
def is_decorator(self) -> bool:
+ """Is this line a decorator?"""
return bool(self) and self.leaves[0].type == token.AT
@property
def is_import(self) -> bool:
+ """Is this an import line?"""
return bool(self) and is_import(self.leaves[0])
@property
def is_with_or_async_with_stmt(self) -> bool:
+ """Is this a with_stmt line?"""
return bool(self) and is_with_or_async_with_stmt(self.leaves[0])
@property
def is_class(self) -> bool:
+ """Is this line a class definition?"""
return (
bool(self)
and self.leaves[0].type == token.NAME
@@ -122,12 +142,14 @@
@property
def is_stub_class(self) -> bool:
+ """Is this line a class definition with a body consisting only of "..."?"""
return self.is_class and self.leaves[-3:] == [
Leaf(token.DOT, ".") for _ in range(3)
]
@property
def is_def(self) -> bool:
+ """Is this a function definition? (Also returns True for async defs.)"""
try:
first_leaf = self.leaves[0]
except IndexError:
@@ -146,12 +168,17 @@
@property
def is_stub_def(self) -> bool:
+ """Is this line a function definition with a body consisting only of "..."?"""
return self.is_def and self.leaves[-4:] == [Leaf(token.COLON, ":")] + [
Leaf(token.DOT, ".") for _ in range(3)
]
@property
def is_class_paren_empty(self) -> bool:
+ """Is this a class with no base classes but using parentheses?
+
+ Those are unnecessary and should be removed.
+ """
return (
bool(self)
and len(self.leaves) == 4
@@ -164,6 +191,7 @@
@property
def _is_triple_quoted_string(self) -> bool:
+ """Is the line a triple quoted string?"""
if not self or self.leaves[0].type != token.STRING:
return False
value = self.leaves[0].value
@@ -175,14 +203,17 @@
@property
def is_docstring(self) -> bool:
+ """Is the line a docstring?"""
return bool(self) and is_docstring(self.leaves[0])
@property
def is_chained_assignment(self) -> bool:
+ """Is the line a chained assignment"""
return [leaf.type for leaf in self.leaves].count(token.EQUAL) > 1
@property
def opens_block(self) -> bool:
+ """Does this line open a new level of indentation."""
if len(self.leaves) == 0:
return False
return self.leaves[-1].type == token.COLON
@@ -190,6 +221,11 @@ def is_fmt_pass_converted(
self, *, first_leaf_matches: Callable[[Leaf], bool] | None = None
) -> bool:
+ """Is this line converted from fmt off/skip code?
+
+ If first_leaf_matches is not None, it only returns True if the first
+ leaf of converted code matches.
+ """
if len(self.leaves) != 1:
return False
leaf = self.leaves[0]
@@ -203,6 +239,7 @@ )
def contains_standalone_comments(self) -> bool:
+ """If so, needs to be split before emitting."""
for leaf in self.leaves:
if leaf.type == STANDALONE_COMMENT:
return True
@@ -210,6 +247,7 @@ return False
def contains_implicit_multiline_string_with_comments(self) -> bool:
+ """Chck if we have an implicit multiline string with comments on the line"""
for leaf_type, leaf_group_iterator in itertools.groupby(
self.leaves, lambda leaf: leaf.type
):
@@ -296,6 +334,11 @@ return any(is_multiline_string(leaf) for leaf in self.leaves)
def has_magic_trailing_comma(self, closing: Leaf) -> bool:
+ """Return True if we have a magic trailing comma, that is when:
+ - there's a trailing comma here
+ - it's not from single-element square bracket indexing
+ - it's not a one-tuple
+ """
if not (
closing.type in CLOSING_BRACKETS
and self.leaves
@@ -335,6 +378,7 @@ return False
def append_comment(self, comment: Leaf) -> bool:
+ """Add an inline or standalone comment to the line."""
if (
comment.type == STANDALONE_COMMENT
and self.bracket_tracker.any_open_brackets()
@@ -371,9 +415,11 @@ return True
def comments_after(self, leaf: Leaf) -> list[Leaf]:
+ """Generate comments that should appear directly after `leaf`."""
return self.comments.get(id(leaf), [])
def remove_trailing_comma(self) -> None:
+ """Remove the trailing comma and moves the comments attached to it."""
trailing_comma = self.leaves.pop()
trailing_comma_comments = self.comments.pop(id(trailing_comma), [])
self.comments.setdefault(id(self.leaves[-1]), []).extend(
@@ -381,6 +427,7 @@ )
def is_complex_subscript(self, leaf: Leaf) -> bool:
+ """Return True iff `leaf` is part of a slice with non-trivial exprs."""
open_lsqb = self.bracket_tracker.get_open_lsqb()
if open_lsqb is None:
return False
@@ -401,6 +448,10 @@ def enumerate_with_length(
self, is_reversed: bool = False
) -> Iterator[tuple[Index, Leaf, int]]:
+ """Return an enumeration of leaves with their length.
+
+ Stops prematurely on multiline strings and standalone comments.
+ """
op = cast(
Callable[[Sequence[Leaf]], Iterator[tuple[Index, Leaf]]],
enumerate_reversed if is_reversed else enumerate,
@@ -425,6 +476,7 @@ )
def __str__(self) -> str:
+ """Render the line."""
if not self:
return "\n"
@@ -440,11 +492,13 @@ return res + "\n"
def __bool__(self) -> bool:
+ """Return True if the line has leaves or comments."""
return bool(self.leaves or self.comments)
@dataclass
class RHSResult:
+ """Intermediate split result from a right hand split."""
head: Line
body: Line
@@ -455,6 +509,11 @@
@dataclass
class LinesBlock:
+ """Class that holds information about a block of formatted lines.
+
+ This is introduced so that the EmptyLineTracker can look behind the standalone
+ comments and adjust their empty lines for class or def lines.
+ """
mode: Mode
previous_block: Optional["LinesBlock"]
@@ -472,6 +531,13 @@
@dataclass
class EmptyLineTracker:
+ """Provides a stateful method that returns the number of potential extra
+ empty lines needed before and after the currently processed line.
+
+ Note: this tracker works on lines that haven't been split yet. It assumes
+ the prefix of the first leaf consists of optional newlines. Those newlines
+ are consumed by `maybe_empty_lines()` and included in the computation.
+ """
mode: Mode
previous_line: Line | None = None
@@ -480,6 +546,11 @@ semantic_leading_comment: LinesBlock | None = None
def maybe_empty_lines(self, current_line: Line) -> LinesBlock:
+ """Return the number of extra empty lines before and after the `current_line`.
+
+ This is for separating `def`, `async def` and `class` with extra empty
+ lines (two on module-level).
+ """
form_feed = (
current_line.depth == 0
and bool(current_line.leaves)
@@ -710,6 +781,7 @@
def enumerate_reversed(sequence: Sequence[T]) -> Iterator[tuple[Index, T]]:
+ """Like `reversed(enumerate(sequence))` if that were possible."""
index = len(sequence) - 1
for element in reversed(sequence):
yield (index, element)
@@ -719,6 +791,18 @@ def append_leaves(
new_line: Line, old_line: Line, leaves: list[Leaf], preformatted: bool = False
) -> None:
+ """
+ Append leaves (taken from @old_line) to @new_line, making sure to fix the
+ underlying Node structure where appropriate.
+
+ All of the leaves in @leaves are duplicated. The duplicates are then
+ appended to @new_line and used to replace their originals in the underlying
+ Node structure. Any comments attached to the old leaves are reattached to
+ the new leaves.
+
+ Pre-conditions:
+ set(@leaves) is a subset of set(@old_line.leaves).
+ """
for old_leaf in leaves:
new_leaf = Leaf(old_leaf.type, old_leaf.value)
replace_child(old_leaf, new_leaf)
@@ -729,6 +813,11 @@
def is_line_short_enough(line: Line, *, mode: Mode, line_str: str = "") -> bool:
+ """For non-multiline strings, return True if `line` is no longer than `line_length`.
+ For multiline strings, looks at the context around `line` to determine
+ if it should be inlined or split up.
+ Uses the provided `line_str` rendering, if any, otherwise computes a new one.
+ """
if not line_str:
line_str = line_to_string(line)
@@ -812,6 +901,12 @@
def can_be_split(line: Line) -> bool:
+ """Return False if the line cannot be split *for sure*.
+
+ This is not an exhaustive search but a cheap heuristic that we can use to
+ avoid some unfortunate formattings (mostly around wrapping unsplittable code
+ in unnecessary parentheses).
+ """
leaves = line.leaves
if len(leaves) < 2:
return False
@@ -845,6 +940,12 @@ rhs: RHSResult,
line_length: int,
) -> bool:
+ """Does `rhs.body` have a shape safe to reformat without optional parens around it?
+
+ Returns True for only a subset of potentially nice looking formattings but
+ the point is to not return false positives that end up producing lines that
+ are too long.
+ """
line = rhs.body
# We can't omit parens if doing so would result in a type: ignore comment
@@ -971,6 +1072,7 @@
def _can_omit_opening_paren(line: Line, *, first: Leaf, line_length: int) -> bool:
+ """See `can_omit_invisible_parens`."""
remainder = False
length = 4 * line.depth
_index = -1
@@ -995,6 +1097,7 @@
def _can_omit_closing_paren(line: Line, *, last: Leaf, line_length: int) -> bool:
+ """See `can_omit_invisible_parens`."""
length = 4 * line.depth
seen_other_brackets = False
for _index, leaf, leaf_length in line.enumerate_with_length():
@@ -1011,4 +1114,8 @@
def line_to_string(line: Line) -> str:
- return str(line).strip("\n")+ """Returns the string representation of @line.
+
+ WARNING: This is known to be computationally expensive.
+ """
+ return str(line).strip("\n")
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/lines.py |
Add detailed docstrings explaining each function | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
__author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import io
import logging
import os
import pkgutil
import sys
from collections.abc import Iterable, Iterator
from contextlib import contextmanager
from dataclasses import dataclass, field
from logging import Logger
from typing import Any, Union, cast
from blib2to3.pgen2.grammar import Grammar
from blib2to3.pgen2.tokenize import TokenInfo
from blib2to3.pytree import NL
# Pgen imports
from . import grammar, parse, pgen, token, tokenize
Path = Union[str, "os.PathLike[str]"]
@dataclass
class ReleaseRange:
start: int
end: int | None = None
tokens: list[Any] = field(default_factory=list)
def lock(self) -> None:
total_eaten = len(self.tokens)
self.end = self.start + total_eaten
class TokenProxy:
def __init__(self, generator: Any) -> None:
self._tokens = generator
self._counter = 0
self._release_ranges: list[ReleaseRange] = []
@contextmanager
def release(self) -> Iterator["TokenProxy"]:
release_range = ReleaseRange(self._counter)
self._release_ranges.append(release_range)
try:
yield self
finally:
# Lock the last release range to the final position that
# has been eaten.
release_range.lock()
def eat(self, point: int) -> Any:
eaten_tokens = self._release_ranges[-1].tokens
if point < len(eaten_tokens):
return eaten_tokens[point]
else:
while point >= len(eaten_tokens):
token = next(self._tokens)
eaten_tokens.append(token)
return token
def __iter__(self) -> "TokenProxy":
return self
def __next__(self) -> Any:
# If the current position is already compromised (looked up)
# return the eaten token, if not just go further on the given
# token producer.
for release_range in self._release_ranges:
assert release_range.end is not None
start, end = release_range.start, release_range.end
if start <= self._counter < end:
token = release_range.tokens[self._counter - start]
break
else:
token = next(self._tokens)
self._counter += 1
return token
def can_advance(self, to: int) -> bool:
# Try to eat, fail if it can't. The eat operation is cached
# so there won't be any additional cost of eating here
try:
self.eat(to)
except StopIteration:
return False
else:
return True
class Driver:
def __init__(self, grammar: Grammar, logger: Logger | None = None) -> None:
self.grammar = grammar
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
def parse_tokens(self, tokens: Iterable[TokenInfo], debug: bool = False) -> NL:
# XXX Move the prefix computation into a wrapper around tokenize.
proxy = TokenProxy(tokens)
p = parse.Parser(self.grammar)
p.setup(proxy=proxy)
lineno = 1
column = 0
indent_columns: list[int] = []
type = value = start = end = line_text = None
prefix = ""
for quintuple in proxy:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
assert type is not None
self.logger.debug(
"%s %r (prefix=%r)", token.tok_name[type], value, prefix
)
if type == token.INDENT:
indent_columns.append(len(value))
_prefix = prefix + value
prefix = ""
value = ""
elif type == token.DEDENT:
_indent_col = indent_columns.pop()
prefix, _prefix = self._partially_consume_prefix(prefix, _indent_col)
if p.addtoken(cast(int, type), value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
if type in {token.INDENT, token.DEDENT}:
prefix = _prefix
lineno, column = end
# FSTRING_MIDDLE and TSTRING_MIDDLE are the only token that can end with a
# newline, and `end` will point to the next line. For that case, don't
# increment lineno.
if value.endswith("\n") and type not in (
token.FSTRING_MIDDLE,
token.TSTRING_MIDDLE,
):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
assert start is not None
raise parse.ParseError("incomplete input", type, value, (prefix, start))
assert p.rootnode is not None
return p.rootnode
def parse_file(
self, filename: Path, encoding: str | None = None, debug: bool = False
) -> NL:
with open(filename, encoding=encoding) as stream:
text = stream.read()
return self.parse_string(text, debug)
def parse_string(self, text: str, debug: bool = False) -> NL:
tokens = tokenize.tokenize(text, grammar=self.grammar)
return self.parse_tokens(tokens, debug)
def _partially_consume_prefix(self, prefix: str, column: int) -> tuple[str, str]:
lines: list[str] = []
current_line = ""
current_column = 0
wait_for_nl = False
for char in prefix:
current_line += char
if wait_for_nl:
if char == "\n":
if current_line.strip() and current_column < column:
res = "".join(lines)
return res, prefix[len(res) :]
lines.append(current_line)
current_line = ""
current_column = 0
wait_for_nl = False
elif char in " \t":
current_column += 1
elif char == "\n":
# unexpected empty line
current_column = 0
elif char == "\f":
current_column = 0
else:
# indent is finished
wait_for_nl = True
return "".join(lines), current_line
def _generate_pickle_name(gt: Path, cache_dir: Path | None = None) -> str:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
name = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if cache_dir:
return os.path.join(cache_dir, os.path.basename(name))
else:
return name
def load_grammar(
gt: str = "Grammar.txt",
gp: str | None = None,
save: bool = True,
force: bool = False,
logger: Logger | None = None,
) -> Grammar:
if logger is None:
logger = logging.getLogger(__name__)
gp = _generate_pickle_name(gt) if gp is None else gp
if force or not _newer(gp, gt):
g: grammar.Grammar = pgen.generate_grammar(gt)
if save:
try:
g.dump(gp)
except OSError:
# Ignore error, caching is not vital.
pass
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a: str, b: str) -> bool:
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
def load_packaged_grammar(
package: str, grammar_source: str, cache_dir: Path | None = None
) -> grammar.Grammar:
if os.path.isfile(grammar_source):
gp = _generate_pickle_name(grammar_source, cache_dir) if cache_dir else None
return load_grammar(grammar_source, gp=gp)
pickled_name = _generate_pickle_name(os.path.basename(grammar_source), cache_dir)
data = pkgutil.get_data(package, pickled_name)
assert data is not None
g = grammar.Grammar()
g.loads(data)
return g
def main(*args: str) -> bool:
if not args:
args = tuple(sys.argv[1:])
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(message)s")
for gt in args:
load_grammar(gt, save=True, force=True)
return True
if __name__ == "__main__":
sys.exit(int(not main())) | --- +++ @@ -5,6 +5,11 @@ # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
+"""Parser driver.
+
+This provides a high-level interface to parse a file into a syntax tree.
+
+"""
__author__ = "Guido van Rossum <guido@python.org>"
@@ -108,6 +113,7 @@ self.logger = logger
def parse_tokens(self, tokens: Iterable[TokenInfo], debug: bool = False) -> NL:
+ """Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
proxy = TokenProxy(tokens)
@@ -181,11 +187,13 @@ def parse_file(
self, filename: Path, encoding: str | None = None, debug: bool = False
) -> NL:
+ """Parse a file and return the syntax tree."""
with open(filename, encoding=encoding) as stream:
text = stream.read()
return self.parse_string(text, debug)
def parse_string(self, text: str, debug: bool = False) -> NL:
+ """Parse a string and return the syntax tree."""
tokens = tokenize.tokenize(text, grammar=self.grammar)
return self.parse_tokens(tokens, debug)
@@ -237,6 +245,7 @@ force: bool = False,
logger: Logger | None = None,
) -> Grammar:
+ """Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger(__name__)
gp = _generate_pickle_name(gt) if gp is None else gp
@@ -255,6 +264,7 @@
def _newer(a: str, b: str) -> bool:
+ """Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
@@ -265,6 +275,16 @@ def load_packaged_grammar(
package: str, grammar_source: str, cache_dir: Path | None = None
) -> grammar.Grammar:
+ """Normally, loads a pickled grammar by doing
+ pkgutil.get_data(package, pickled_grammar)
+ where *pickled_grammar* is computed from *grammar_source* by adding the
+ Python version and using a ``.pickle`` extension.
+
+ However, if *grammar_source* is an extant file, load_grammar(grammar_source)
+ is called instead. This facilitates using a packaged grammar file when needed
+ but preserves load_grammar's automatic regeneration behavior when possible.
+
+ """
if os.path.isfile(grammar_source):
gp = _generate_pickle_name(grammar_source, cache_dir) if cache_dir else None
return load_grammar(grammar_source, gp=gp)
@@ -277,6 +297,10 @@
def main(*args: str) -> bool:
+ """Main program, when run as a script: produce grammar pickle files.
+
+ Calls load_grammar for each argument, a path to a grammar text file.
+ """
if not args:
args = tuple(sys.argv[1:])
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(message)s")
@@ -286,4 +310,4 @@
if __name__ == "__main__":
- sys.exit(int(not main()))+ sys.exit(int(not main()))
| https://raw.githubusercontent.com/psf/black/HEAD/src/blib2to3/pgen2/driver.py |
Add docstrings to make code maintainable |
import ast
import collections
import dataclasses
import re
import secrets
import string
from collections.abc import Collection
from functools import lru_cache
from importlib.util import find_spec
from typing import TypeGuard
from black.mode import Mode
from black.output import out
from black.report import NothingChanged
TRANSFORMED_MAGICS = frozenset((
"get_ipython().run_cell_magic",
"get_ipython().system",
"get_ipython().getoutput",
"get_ipython().run_line_magic",
))
TOKENS_TO_IGNORE = frozenset((
"ENDMARKER",
"NL",
"NEWLINE",
"COMMENT",
"DEDENT",
"UNIMPORTANT_WS",
"ESCAPED_NL",
))
PYTHON_CELL_MAGICS = frozenset((
"capture",
"prun",
"pypy",
"python",
"python3",
"time",
"timeit",
))
@dataclasses.dataclass(frozen=True)
class Replacement:
mask: str
src: str
@lru_cache
def jupyter_dependencies_are_installed(*, warn: bool) -> bool:
installed = (
find_spec("tokenize_rt") is not None and find_spec("IPython") is not None
)
if not installed and warn:
msg = (
"Skipping .ipynb files as Jupyter dependencies are not installed.\n"
'You can fix this by running ``pip install "black[jupyter]"``'
)
out(msg)
return installed
def validate_cell(src: str, mode: Mode) -> None:
if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS):
raise NothingChanged
line = _get_code_start(src)
if line.startswith("%%") and (
line.split(maxsplit=1)[0][2:]
not in PYTHON_CELL_MAGICS | mode.python_cell_magics
):
raise NothingChanged
def remove_trailing_semicolon(src: str) -> tuple[str, bool]:
from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src
tokens = src_to_tokens(src)
trailing_semicolon = False
for idx, token in reversed_enumerate(tokens):
if token.name in TOKENS_TO_IGNORE:
continue
if token.name == "OP" and token.src == ";":
del tokens[idx]
trailing_semicolon = True
break
if not trailing_semicolon:
return src, False
return tokens_to_src(tokens), True
def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
if not has_trailing_semicolon:
return src
from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src
tokens = src_to_tokens(src)
for idx, token in reversed_enumerate(tokens):
if token.name in TOKENS_TO_IGNORE:
continue
tokens[idx] = token._replace(src=token.src + ";")
break
else: # pragma: nocover
raise AssertionError(
"INTERNAL ERROR: Was not able to reinstate trailing semicolon. "
"Please report a bug on https://github.com/psf/black/issues. "
) from None
return str(tokens_to_src(tokens))
def mask_cell(src: str) -> tuple[str, list[Replacement]]:
replacements: list[Replacement] = []
try:
ast.parse(src)
except SyntaxError:
# Might have IPython magics, will process below.
pass
else:
# Syntax is fine, nothing to mask, early return.
return src, replacements
from IPython.core.inputtransformer2 import TransformerManager
transformer_manager = TransformerManager()
# A side effect of the following transformation is that it also removes any
# empty lines at the beginning of the cell.
transformed = transformer_manager.transform_cell(src)
transformed, cell_magic_replacements = replace_cell_magics(transformed)
replacements += cell_magic_replacements
transformed = transformer_manager.transform_cell(transformed)
transformed, magic_replacements = replace_magics(transformed)
if len(transformed.strip().splitlines()) != len(src.strip().splitlines()):
# Multi-line magic, not supported.
raise NothingChanged
replacements += magic_replacements
return transformed, replacements
def create_token(n_chars: int) -> str:
assert n_chars > 0
if n_chars == 1:
return secrets.choice(string.ascii_letters)
if n_chars < 4:
return "_" + "".join(
secrets.choice(string.ascii_letters + string.digits + "_")
for _ in range(n_chars - 1)
)
n_bytes = max(n_chars // 2 - 1, 1)
token = secrets.token_hex(n_bytes)
if len(token) + 3 > n_chars:
token = token[:-1]
# We use a bytestring so that the string does not get interpreted
# as a docstring.
return f'b"{token}"'
def get_token(src: str, magic: str, existing_tokens: Collection[str] = ()) -> str:
assert magic
n_chars = len(magic)
token = create_token(n_chars)
counter = 0
while token in src or token in existing_tokens:
token = create_token(n_chars)
counter += 1
if counter > 100:
raise AssertionError(
"INTERNAL ERROR: Black was not able to replace IPython magic. "
"Please report a bug on https://github.com/psf/black/issues. "
f"The magic might be helpful: {magic}"
) from None
return token
def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]:
replacements: list[Replacement] = []
tree = ast.parse(src)
cell_magic_finder = CellMagicFinder()
cell_magic_finder.visit(tree)
if cell_magic_finder.cell_magic is None:
return src, replacements
header = cell_magic_finder.cell_magic.header
mask = get_token(src, header)
replacements.append(Replacement(mask=mask, src=header))
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
def replace_magics(src: str) -> tuple[str, list[Replacement]]:
replacements = []
existing_tokens: set[str] = set()
magic_finder = MagicFinder()
magic_finder.visit(ast.parse(src))
new_srcs = []
for i, line in enumerate(src.split("\n"), start=1):
if i in magic_finder.magics:
offsets_and_magics = magic_finder.magics[i]
if len(offsets_and_magics) != 1: # pragma: nocover
raise AssertionError(
f"Expecting one magic per line, got: {offsets_and_magics}\n"
"Please report a bug on https://github.com/psf/black/issues."
)
col_offset, magic = (
offsets_and_magics[0].col_offset,
offsets_and_magics[0].magic,
)
mask = get_token(src, magic, existing_tokens)
replacements.append(Replacement(mask=mask, src=magic))
existing_tokens.add(mask)
line = line[:col_offset] + mask
new_srcs.append(line)
return "\n".join(new_srcs), replacements
def unmask_cell(src: str, replacements: list[Replacement]) -> str:
for replacement in replacements:
if src.count(replacement.mask) != 1:
raise NothingChanged
src = src.replace(replacement.mask, replacement.src, 1)
return src
def _get_code_start(src: str) -> str:
for match in re.finditer(".+", src):
line = match.group(0).lstrip()
if line and not line.startswith("#"):
return line
return ""
def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
return (
isinstance(node, ast.Attribute)
and isinstance(node.value, ast.Call)
and isinstance(node.value.func, ast.Name)
and node.value.func.id == "get_ipython"
)
def _get_str_args(args: list[ast.expr]) -> list[str]:
str_args = []
for arg in args:
assert isinstance(arg, ast.Constant) and isinstance(arg.value, str)
str_args.append(arg.value)
return str_args
@dataclasses.dataclass(frozen=True)
class CellMagic:
name: str
params: str | None
body: str
@property
def header(self) -> str:
if self.params:
return f"%%{self.name} {self.params}"
return f"%%{self.name}"
# ast.NodeVisitor + dataclass = breakage under mypyc.
class CellMagicFinder(ast.NodeVisitor):
def __init__(self, cell_magic: CellMagic | None = None) -> None:
self.cell_magic = cell_magic
def visit_Expr(self, node: ast.Expr) -> None:
if (
isinstance(node.value, ast.Call)
and _is_ipython_magic(node.value.func)
and node.value.func.attr == "run_cell_magic"
):
args = _get_str_args(node.value.args)
self.cell_magic = CellMagic(name=args[0], params=args[1], body=args[2])
self.generic_visit(node)
@dataclasses.dataclass(frozen=True)
class OffsetAndMagic:
col_offset: int
magic: str
# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here
# as mypyc will generate broken code.
class MagicFinder(ast.NodeVisitor):
def __init__(self) -> None:
self.magics: dict[int, list[OffsetAndMagic]] = collections.defaultdict(list)
def visit_Assign(self, node: ast.Assign) -> None:
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
args = _get_str_args(node.value.args)
if node.value.func.attr == "getoutput":
src = f"!{args[0]}"
elif node.value.func.attr == "run_line_magic":
src = f"%{args[0]}"
if args[1]:
src += f" {args[1]}"
else:
raise AssertionError(
f"Unexpected IPython magic {node.value.func.attr!r} found. "
"Please report a bug on https://github.com/psf/black/issues."
) from None
self.magics[node.value.lineno].append(
OffsetAndMagic(node.value.col_offset, src)
)
self.generic_visit(node)
def visit_Expr(self, node: ast.Expr) -> None:
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
args = _get_str_args(node.value.args)
if node.value.func.attr == "run_line_magic":
if args[0] == "pinfo":
src = f"?{args[1]}"
elif args[0] == "pinfo2":
src = f"??{args[1]}"
else:
src = f"%{args[0]}"
if args[1]:
src += f" {args[1]}"
elif node.value.func.attr == "system":
src = f"!{args[0]}"
elif node.value.func.attr == "getoutput":
src = f"!!{args[0]}"
else:
raise NothingChanged # unsupported magic.
self.magics[node.value.lineno].append(
OffsetAndMagic(node.value.col_offset, src)
)
self.generic_visit(node) | --- +++ @@ -1,3 +1,4 @@+"""Functions to process IPython magics with."""
import ast
import collections
@@ -61,6 +62,22 @@
def validate_cell(src: str, mode: Mode) -> None:
+ r"""Check that cell does not already contain TransformerManager transformations,
+ or non-Python cell magics, which might cause tokenizer_rt to break because of
+ indentations.
+
+ If a cell contains ``!ls``, then it'll be transformed to
+ ``get_ipython().system('ls')``. However, if the cell originally contained
+ ``get_ipython().system('ls')``, then it would get transformed in the same way:
+
+ >>> TransformerManager().transform_cell("get_ipython().system('ls')")
+ "get_ipython().system('ls')\n"
+ >>> TransformerManager().transform_cell("!ls")
+ "get_ipython().system('ls')\n"
+
+ Due to the impossibility of safely roundtripping in such situations, cells
+ containing transformed magics will be ignored.
+ """
if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS):
raise NothingChanged
@@ -73,6 +90,21 @@
def remove_trailing_semicolon(src: str) -> tuple[str, bool]:
+ """Remove trailing semicolon from Jupyter notebook cell.
+
+ For example,
+
+ fig, ax = plt.subplots()
+ ax.plot(x_data, y_data); # plot data
+
+ would become
+
+ fig, ax = plt.subplots()
+ ax.plot(x_data, y_data) # plot data
+
+ Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
+ ``tokenize_rt`` so that round-tripping works fine.
+ """
from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src
tokens = src_to_tokens(src)
@@ -90,6 +122,11 @@
def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
+ """Put trailing semicolon back if cell originally had it.
+
+ Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
+ ``tokenize_rt`` so that round-tripping works fine.
+ """
if not has_trailing_semicolon:
return src
from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src
@@ -109,6 +146,20 @@
def mask_cell(src: str) -> tuple[str, list[Replacement]]:
+ """Mask IPython magics so content becomes parseable Python code.
+
+ For example,
+
+ %matplotlib inline
+ 'foo'
+
+ becomes
+
+ b"25716f358c32750"
+ 'foo'
+
+ The replacements are returned, along with the transformed code.
+ """
replacements: list[Replacement] = []
try:
ast.parse(src)
@@ -137,6 +188,7 @@
def create_token(n_chars: int) -> str:
+ """Create a randomly generated token that is n_chars characters long."""
assert n_chars > 0
if n_chars == 1:
return secrets.choice(string.ascii_letters)
@@ -155,6 +207,13 @@
def get_token(src: str, magic: str, existing_tokens: Collection[str] = ()) -> str:
+ """Return randomly generated token to mask IPython magic with.
+
+ For example, if 'magic' was `%matplotlib inline`, then a possible
+ token to mask it with would be `"43fdd17f7e5ddc83"`. The token
+ will be the same length as the magic, and we make sure that it was
+ not already present anywhere else in the cell.
+ """
assert magic
n_chars = len(magic)
token = create_token(n_chars)
@@ -172,6 +231,22 @@
def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]:
+ r"""Replace cell magic with token.
+
+ Note that 'src' will already have been processed by IPython's
+ TransformerManager().transform_cell.
+
+ Example,
+
+ get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\n')
+
+ becomes
+
+ "a794."
+ ls =!ls
+
+ The replacement, along with the transformed code, is returned.
+ """
replacements: list[Replacement] = []
tree = ast.parse(src)
@@ -187,6 +262,23 @@
def replace_magics(src: str) -> tuple[str, list[Replacement]]:
+ """Replace magics within body of cell.
+
+ Note that 'src' will already have been processed by IPython's
+ TransformerManager().transform_cell.
+
+ Example, this
+
+ get_ipython().run_line_magic('matplotlib', 'inline')
+ 'foo'
+
+ becomes
+
+ "5e67db56d490fd39"
+ 'foo'
+
+ The replacement, along with the transformed code, are returned.
+ """
replacements = []
existing_tokens: set[str] = set()
magic_finder = MagicFinder()
@@ -213,6 +305,18 @@
def unmask_cell(src: str, replacements: list[Replacement]) -> str:
+ """Remove replacements from cell.
+
+ For example
+
+ "9b20"
+ foo = bar
+
+ becomes
+
+ %%time
+ foo = bar
+ """
for replacement in replacements:
if src.count(replacement.mask) != 1:
raise NothingChanged
@@ -221,6 +325,13 @@
def _get_code_start(src: str) -> str:
+ """Provides the first line where the code starts.
+
+ Iterates over lines of code until it finds the first line that doesn't
+ contain only empty spaces and comments. It removes any empty spaces at the
+ start of the line and returns it. If such line doesn't exist, it returns an
+ empty string.
+ """
for match in re.finditer(".+", src):
line = match.group(0).lstrip()
if line and not line.startswith("#"):
@@ -229,6 +340,12 @@
def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
+ """Check if attribute is IPython magic.
+
+ Note that the source of the abstract syntax tree
+ will already have been processed by IPython's
+ TransformerManager().transform_cell.
+ """
return (
isinstance(node, ast.Attribute)
and isinstance(node.value, ast.Call)
@@ -260,11 +377,29 @@
# ast.NodeVisitor + dataclass = breakage under mypyc.
class CellMagicFinder(ast.NodeVisitor):
+ r"""Find cell magics.
+
+ Note that the source of the abstract syntax tree
+ will already have been processed by IPython's
+ TransformerManager().transform_cell.
+
+ For example,
+
+ %%time\n
+ foo()
+
+ would have been transformed to
+
+ get_ipython().run_cell_magic('time', '', 'foo()\n')
+
+ and we look for instances of the latter.
+ """
def __init__(self, cell_magic: CellMagic | None = None) -> None:
self.cell_magic = cell_magic
def visit_Expr(self, node: ast.Expr) -> None:
+ """Find cell magic, extract header and body."""
if (
isinstance(node.value, ast.Call)
and _is_ipython_magic(node.value.func)
@@ -284,11 +419,42 @@ # Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here
# as mypyc will generate broken code.
class MagicFinder(ast.NodeVisitor):
+ """Visit cell to look for get_ipython calls.
+
+ Note that the source of the abstract syntax tree
+ will already have been processed by IPython's
+ TransformerManager().transform_cell.
+
+ For example,
+
+ %matplotlib inline
+
+ would have been transformed to
+
+ get_ipython().run_line_magic('matplotlib', 'inline')
+
+ and we look for instances of the latter (and likewise for other
+ types of magics).
+ """
def __init__(self) -> None:
self.magics: dict[int, list[OffsetAndMagic]] = collections.defaultdict(list)
def visit_Assign(self, node: ast.Assign) -> None:
+ """Look for system assign magics.
+
+ For example,
+
+ black_version = !black --version
+ env = %env var
+
+ would have been (respectively) transformed to
+
+ black_version = get_ipython().getoutput('black --version')
+ env = get_ipython().run_line_magic('env', 'var')
+
+ and we look for instances of any of the latter.
+ """
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
args = _get_str_args(node.value.args)
if node.value.func.attr == "getoutput":
@@ -308,6 +474,24 @@ self.generic_visit(node)
def visit_Expr(self, node: ast.Expr) -> None:
+ """Look for magics in body of cell.
+
+ For examples,
+
+ !ls
+ !!ls
+ ?ls
+ ??ls
+
+ would (respectively) get transformed to
+
+ get_ipython().system('ls')
+ get_ipython().getoutput('ls')
+ get_ipython().run_line_magic('pinfo', 'ls')
+ get_ipython().run_line_magic('pinfo2', 'ls')
+
+ and we look for instances of any of the latter.
+ """
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
args = _get_str_args(node.value.args)
if node.value.func.attr == "run_line_magic":
@@ -328,4 +512,4 @@ self.magics[node.value.lineno].append(
OffsetAndMagic(node.value.col_offset, src)
)
- self.generic_visit(node)+ self.generic_visit(node)
| https://raw.githubusercontent.com/psf/black/HEAD/src/black/handle_ipynb_magics.py |
Add docstrings to improve collaboration | # Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers.deepspeed import is_deepspeed_zero3_enabled
from trainer import Trainer
from transformers.trainer_utils import PredictionOutput
from transformers.utils import logging
logger = logging.get_logger(__name__)
class Seq2SeqTrainer(Trainer):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
**gen_kwargs
) -> Dict[str, float]:
gen_kwargs = gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.args.generation_max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
)
self._gen_kwargs = gen_kwargs
return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def predict(
self,
test_dataset: Dataset,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "test",
**gen_kwargs
) -> PredictionOutput:
gen_kwargs = gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.args.generation_max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
)
self._gen_kwargs = gen_kwargs
return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
if not self.args.predict_with_generate or prediction_loss_only:
return super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
)
has_labels = "labels" in inputs
inputs = self._prepare_inputs(inputs)
# XXX: adapt synced_gpus for fairscale as well
gen_kwargs = self._gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
gen_kwargs["max_length"] = self.model.config.max_length
gen_kwargs["num_beams"] = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.model.config.num_beams
)
default_synced_gpus = True if is_deepspeed_zero3_enabled() else False
gen_kwargs["synced_gpus"] = (
gen_kwargs["synced_gpus"] if gen_kwargs.get("synced_gpus") is not None else default_synced_gpus
)
if "attention_mask" in inputs:
gen_kwargs["attention_mask"] = inputs.get("attention_mask", None)
if "position_ids" in inputs:
gen_kwargs["position_ids"] = inputs.get("position_ids", None)
if "global_attention_mask" in inputs:
gen_kwargs["global_attention_mask"] = inputs.get("global_attention_mask", None)
# prepare generation inputs
# some encoder-decoder models can have varying encoder's and thus
# varying model input names
if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name:
generation_inputs = inputs[self.model.encoder.main_input_name]
else:
generation_inputs = inputs[self.model.main_input_name]
gen_kwargs["input_ids"] = generation_inputs
generated_tokens = self.model.generate(**gen_kwargs)
generated_tokens = generated_tokens[:, generation_inputs.size()[-1]:]
# in case the batch is shorter than max length, the output should be padded
if gen_kwargs.get("max_length") is not None and generated_tokens.shape[-1] < gen_kwargs["max_length"]:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
elif gen_kwargs.get("max_new_tokens") is not None and generated_tokens.shape[-1] < (
gen_kwargs["max_new_tokens"] + 1
):
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_new_tokens"] + 1)
loss = None
if self.args.prediction_loss_only:
return (loss, None, None)
if has_labels:
labels = inputs["labels"]
if gen_kwargs.get("max_length") is not None and labels.shape[-1] < gen_kwargs["max_length"]:
labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"])
elif gen_kwargs.get("max_new_tokens") is not None and labels.shape[-1] < (
gen_kwargs["max_new_tokens"] + 1
):
labels = self._pad_tensors_to_max_len(labels, (gen_kwargs["max_new_tokens"] + 1))
else:
labels = None
return (loss, generated_tokens, labels)
def _pad_tensors_to_max_len(self, tensor, max_length):
if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"):
# If PAD token is not defined at least EOS token has to be defined
pad_token_id = (
self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
)
else:
if self.model.config.pad_token_id is not None:
pad_token_id = self.model.config.pad_token_id
else:
raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors")
padded_tensor = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
)
padded_tensor[:, : tensor.shape[-1]] = tensor
return padded_tensor | --- +++ @@ -35,6 +35,37 @@ metric_key_prefix: str = "eval",
**gen_kwargs
) -> Dict[str, float]:
+ """
+ Run evaluation and returns metrics.
+
+ The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
+ (pass it to the init `compute_metrics` argument).
+
+ You can also subclass and override this method to inject custom behavior.
+
+ Args:
+ eval_dataset (`Dataset`, *optional*):
+ Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns
+ not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
+ method.
+ ignore_keys (`List[str]`, *optional*):
+ A list of keys in the output of your model (if it is a dictionary) that should be ignored when
+ gathering predictions.
+ metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
+ An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
+ "eval_bleu" if the prefix is `"eval"` (default)
+ max_length (`int`, *optional*):
+ The maximum target length to use when predicting with the generate method.
+ num_beams (`int`, *optional*):
+ Number of beams for beam search that will be used when predicting with the generate method. 1 means no
+ beam search.
+ gen_kwargs:
+ Additional `generate` specific kwargs.
+
+ Returns:
+ A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
+ dictionary also contains the epoch number which comes from the training state.
+ """
gen_kwargs = gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
@@ -53,6 +84,45 @@ metric_key_prefix: str = "test",
**gen_kwargs
) -> PredictionOutput:
+ """
+ Run prediction and returns predictions and potential metrics.
+
+ Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
+ will also return metrics, like in `evaluate()`.
+
+ Args:
+ test_dataset (`Dataset`):
+ Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the
+ `model.forward()` method are automatically removed. Has to implement the method `__len__`
+ ignore_keys (`List[str]`, *optional*):
+ A list of keys in the output of your model (if it is a dictionary) that should be ignored when
+ gathering predictions.
+ metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
+ An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
+ "eval_bleu" if the prefix is `"eval"` (default)
+ max_length (`int`, *optional*):
+ The maximum target length to use when predicting with the generate method.
+ num_beams (`int`, *optional*):
+ Number of beams for beam search that will be used when predicting with the generate method. 1 means no
+ beam search.
+ gen_kwargs:
+ Additional `generate` specific kwargs.
+
+ <Tip>
+
+ If your predictions or labels have different sequence lengths (for instance because you're doing dynamic
+ padding in a token classification task) the predictions will be padded (on the right) to allow for
+ concatenation into one array. The padding index is -100.
+
+ </Tip>
+
+ Returns: *NamedTuple* A namedtuple with the following keys:
+
+ - predictions (`np.ndarray`): The predictions on `test_dataset`.
+ - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
+ - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
+ labels).
+ """
gen_kwargs = gen_kwargs.copy()
if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None:
@@ -72,6 +142,26 @@ prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
+ """
+ Perform an evaluation step on `model` using `inputs`.
+
+ Subclass and override to inject custom behavior.
+
+ Args:
+ model (`nn.Module`):
+ The model to evaluate.
+ inputs (`Dict[str, Union[torch.Tensor, Any]]`):
+ The inputs and targets of the model.
+
+ The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
+ argument `labels`. Check your model's documentation for all accepted arguments.
+ prediction_loss_only (`bool`):
+ Whether or not to return the loss only.
+
+ Return:
+ Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
+ labels (each being optional).
+ """
if not self.args.predict_with_generate or prediction_loss_only:
return super().prediction_step(
@@ -154,4 +244,4 @@ (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
)
padded_tensor[:, : tensor.shape[-1]] = tensor
- return padded_tensor+ return padded_tensor
| https://raw.githubusercontent.com/zai-org/ChatGLM-6B/HEAD/ptuning/trainer_seq2seq.py |
Generate docstrings with examples | from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
ptuning_checkpoint: str = field(
default=None, metadata={"help": "Path to p-tuning v2 checkpoints"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},
)
resize_position_embeddings: Optional[bool] = field(
default=None,
metadata={
"help": (
"Whether to automatically resize the position embeddings if `max_source_length` exceeds "
"the model's position embeddings."
)
},
)
quantization_bit: Optional[int] = field(
default=None
)
pre_seq_len: Optional[int] = field(
default=None
)
prefix_projection: bool = field(
default=False
)
@dataclass
class DataTrainingArguments:
lang: Optional[str] = field(default=None, metadata={"help": "Language id for summarization."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
prompt_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
response_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."},
)
history_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the history of chat."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": (
"An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
)
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
)
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default="", metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
forced_bos_token: Optional[str] = field(
default=None,
metadata={
"help": (
"The token to force as the first generated token after the decoder_start_token_id."
"Useful for multilingual models like mBART where the first generated token"
"needs to be the target language token (Usually it is the target language token)"
)
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None:
raise ValueError("Need either a dataset name or a training/validation/test file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
| --- +++ @@ -4,6 +4,9 @@
@dataclass
class ModelArguments:
+ """
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
+ """
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
@@ -60,6 +63,9 @@
@dataclass
class DataTrainingArguments:
+ """
+ Arguments pertaining to what data we are going to input our model for training and eval.
+ """
lang: Optional[str] = field(default=None, metadata={"help": "Language id for summarization."})
@@ -215,3 +221,4 @@ assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
+
| https://raw.githubusercontent.com/zai-org/ChatGLM-6B/HEAD/ptuning/arguments.py |
Add docstrings for better understanding | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import functools
import glob
import inspect
import math
import os
import random
import re
import shutil
import sys
import time
import warnings
from collections.abc import Mapping
from distutils.util import strtobool
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from tqdm.auto import tqdm
# Integrations must be imported before ML frameworks:
# isort: off
from transformers.integrations import (
default_hp_search_backend,
get_reporting_integration_callbacks,
hp_params,
is_fairscale_available,
is_optuna_available,
is_ray_tune_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
# isort: on
import numpy as np
import torch
import torch.distributed as dist
from huggingface_hub import Repository, create_repo
from packaging import version
from torch import nn
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from transformers import __version__
from transformers.configuration_utils import PretrainedConfig
from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator
from transformers.debug_utils import DebugOption, DebugUnderflowOverflow
from transformers.deepspeed import deepspeed_init, is_deepspeed_zero3_enabled
from transformers.dependency_versions_check import dep_version_check
from transformers.modelcard import TrainingSummary
from transformers.modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model
from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.optimization import Adafactor, get_scheduler
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_10, is_torch_less_than_1_11
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_callback import (
CallbackHandler,
DefaultFlowCallback,
PrinterCallback,
ProgressCallback,
TrainerCallback,
TrainerControl,
TrainerState,
)
from transformers.trainer_pt_utils import (
DistributedLengthGroupedSampler,
DistributedSamplerWithLoop,
DistributedTensorGatherer,
IterableDatasetShard,
LabelSmoother,
LengthGroupedSampler,
SequentialDistributedSampler,
ShardSampler,
distributed_broadcast_scalars,
distributed_concat,
find_batch_size,
get_module_class_from_name,
get_parameter_names,
nested_concat,
nested_detach,
nested_numpify,
nested_truncate,
nested_xla_mesh_reduce,
reissue_pt_warnings,
)
from transformers.trainer_utils import (
PREFIX_CHECKPOINT_DIR,
BestRun,
EvalLoopOutput,
EvalPrediction,
FSDPOption,
HPSearchBackend,
HubStrategy,
IntervalStrategy,
PredictionOutput,
RemoveColumnsCollator,
ShardedDDPOption,
TrainerMemoryTracker,
TrainOutput,
default_compute_objective,
default_hp_space,
denumpify_detensorize,
enable_full_determinism,
find_executable_batch_size,
get_last_checkpoint,
has_length,
number_of_arguments,
seed_worker,
set_seed,
speed_metrics,
)
from transformers.training_args import OptimizerNames, ParallelMode, TrainingArguments
from transformers.utils import (
CONFIG_NAME,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
can_return_loss,
find_labels,
get_full_repo_name,
is_accelerate_available,
is_apex_available,
is_datasets_available,
is_in_notebook,
is_ipex_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_torch_compile_available,
is_torch_neuroncore_available,
is_torch_tpu_available,
logging,
)
from transformers.utils.generic import ContextManagers
_is_native_cpu_amp_available = is_torch_greater_or_equal_than_1_10
DEFAULT_CALLBACKS = [DefaultFlowCallback]
DEFAULT_PROGRESS_CALLBACK = ProgressCallback
if is_in_notebook():
from transformers.utils.notebook import NotebookProgressCallback
DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback
if is_apex_available():
from apex import amp
if is_datasets_available():
import datasets
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
import torch_xla.distributed.parallel_loader as pl
if is_fairscale_available():
dep_version_check("fairscale")
import fairscale
from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP
from fairscale.nn.wrap import auto_wrap
from fairscale.optim import OSS
from fairscale.optim.grad_scaler import ShardedGradScaler
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
from smdistributed.modelparallel import __version__ as SMP_VERSION
IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10")
from transformers.trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat
else:
IS_SAGEMAKER_MP_POST_1_10 = False
skip_first_batches = None
if is_accelerate_available():
from accelerate import __version__ as accelerate_version
if version.parse(accelerate_version) >= version.parse("0.16"):
from accelerate import skip_first_batches
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
# Name of the files used for checkpointing
TRAINING_ARGS_NAME = "training_args.bin"
TRAINER_STATE_NAME = "trainer_state.json"
OPTIMIZER_NAME = "optimizer.pt"
SCHEDULER_NAME = "scheduler.pt"
SCALER_NAME = "scaler.pt"
class Trainer:
from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
def __init__(
self,
model: Union[PreTrainedModel, nn.Module] = None,
args: TrainingArguments = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Dataset] = None,
eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None,
tokenizer: Optional[PreTrainedTokenizerBase] = None,
model_init: Optional[Callable[[], PreTrainedModel]] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
save_prefixencoder: bool = False,
):
self.save_prefixencoder = save_prefixencoder
if args is None:
output_dir = "tmp_trainer"
logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.")
args = TrainingArguments(output_dir=output_dir)
self.args = args
# Seed must be set before instantiating the model when using model
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.hp_name = None
self.deepspeed = None
self.is_in_train = False
# memory metrics - must set up as early as possible
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# set the correct log level depending on the node
log_level = args.get_process_log_level()
logging.set_verbosity(log_level)
# force device and distributed setup init explicitly
args._setup_devices
if model is None:
if model_init is not None:
self.model_init = model_init
model = self.call_model_init()
else:
raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument")
else:
if model_init is not None:
warnings.warn(
"`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will"
" overwrite your model when calling the `train` method. This will become a fatal error in the next"
" release.",
FutureWarning,
)
self.model_init = model_init
if model.__class__.__name__ in MODEL_MAPPING_NAMES:
raise ValueError(
f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only "
"computes hidden states and does not accept any labels. You should choose a model with a head "
"suitable for your task like any of the `AutoModelForXxx` listed at "
"https://huggingface.co/docs/transformers/model_doc/auto."
)
if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel:
self.is_model_parallel = True
else:
self.is_model_parallel = False
# At this stage the model is already loaded
if getattr(model, "is_loaded_in_8bit", False):
if getattr(model, "_is_int8_training_enabled", False):
logger.info(
"The model is loaded in 8-bit precision. To train this model you need to add additional modules"
" inside the model such as adapters using `peft` library and freeze the model weights. Please"
" check "
" the examples in https://github.com/huggingface/peft for more details."
)
else:
raise ValueError(
"The model you want to train is loaded in 8-bit precision. if you want to fine-tune an 8-bit"
" model, please make sure that you have installed `bitsandbytes>=0.37.0`. "
)
# Setup Sharded DDP training
self.sharded_ddp = None
if len(args.sharded_ddp) > 0:
if args.deepspeed:
raise ValueError(
"Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if len(args.fsdp) > 0:
raise ValueError(
"Using --sharded_ddp xxx together with --fsdp is not possible, deactivate one of those flags."
)
if args.local_rank == -1:
raise ValueError("Using sharded DDP only works in distributed training.")
elif not is_fairscale_available():
raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.")
elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None:
raise ImportError(
"Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found "
f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`."
)
elif ShardedDDPOption.SIMPLE in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.SIMPLE
elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_2
elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp:
self.sharded_ddp = ShardedDDPOption.ZERO_DP_3
self.fsdp = None
if len(args.fsdp) > 0:
if args.deepspeed:
raise ValueError(
"Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags."
)
if not args.fsdp_config["xla"] and args.local_rank == -1:
raise ValueError("Using fsdp only works in distributed training.")
# dep_version_check("torch>=1.12.0")
# Would have to update setup.py with torch>=1.12.0
# which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0
# below is the current alternative.
if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"):
raise ValueError("FSDP requires PyTorch >= 1.12.0")
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, ShardingStrategy
if FSDPOption.FULL_SHARD in args.fsdp:
self.fsdp = ShardingStrategy.FULL_SHARD
elif FSDPOption.SHARD_GRAD_OP in args.fsdp:
self.fsdp = ShardingStrategy.SHARD_GRAD_OP
elif FSDPOption.NO_SHARD in args.fsdp:
self.fsdp = ShardingStrategy.NO_SHARD
self.backward_prefetch = BackwardPrefetch.BACKWARD_PRE
if "backward_prefetch" in self.args.fsdp_config and "backward_pos" not in self.backward_prefetch:
self.backward_prefetch = BackwardPrefetch.BACKWARD_POST
self.forword_prefetch = False
if self.args.fsdp_config.get("forword_prefect", False):
self.forword_prefetch = True
self.limit_all_gathers = False
if self.args.fsdp_config.get("limit_all_gathers", False):
self.limit_all_gathers = True
# one place to sort out whether to place the model on device or not
# postpone switching model to cuda when:
# 1. MP - since we are trying to fit a much bigger than 1 gpu model
# 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway,
# and we only use deepspeed for training at the moment
# 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first
# 4. Sharded DDP - same as MP
# 5. FSDP - same as MP
self.place_model_on_device = args.place_model_on_device
if (
self.is_model_parallel
or args.deepspeed
or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train)
or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3])
or (self.fsdp is not None)
):
self.place_model_on_device = False
default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)
self.data_collator = data_collator if data_collator is not None else default_collator
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.tokenizer = tokenizer
if self.place_model_on_device and not getattr(model, "is_loaded_in_8bit", False):
self._move_model_to_device(model, args.device)
# Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
if self.is_model_parallel:
self.args._n_gpu = 1
# later use `self.model is self.model_wrapped` to check if it's wrapped or not
self.model_wrapped = model
self.model = model
self.compute_metrics = compute_metrics
self.preprocess_logits_for_metrics = preprocess_logits_for_metrics
self.optimizer, self.lr_scheduler = optimizers
if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):
raise RuntimeError(
"Passing a `model_init` is incompatible with providing the `optimizers` argument. "
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
if is_torch_tpu_available() and self.optimizer is not None:
for param in self.model.parameters():
model_device = param.device
break
for param_group in self.optimizer.param_groups:
if len(param_group["params"]) > 0:
optimizer_device = param_group["params"][0].device
break
if model_device != optimizer_device:
raise ValueError(
"The model and the optimizer parameters are not on the same device, which probably means you"
" created an optimizer around your model **before** putting on the device and passing it to the"
" `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and"
" `model.to(xm.xla_device())` is performed before the optimizer creation in your script."
)
if ((self.sharded_ddp is not None) or args.deepspeed or (self.fsdp is not None)) and (
self.optimizer is not None or self.lr_scheduler is not None
):
raise RuntimeError(
"Passing `optimizers` is not allowed if Fairscale, Deepspeed or PyTorch FSDP is enabled."
"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method."
)
default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)
callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler
)
self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)
# Will be set to True by `self._setup_loggers()` on first call to `self.log()`.
self._loggers_initialized = False
# Create clone of distant repo and output directory if needed
if self.args.push_to_hub:
self.init_git_repo(at_init=True)
# In case of pull, we need to make sure every process has the latest.
if is_torch_tpu_available():
xm.rendezvous("init git repo")
elif args.local_rank != -1:
dist.barrier()
if self.args.should_save:
os.makedirs(self.args.output_dir, exist_ok=True)
if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)):
raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).")
if args.max_steps > 0:
logger.info("max_steps is given, it will override any value given in num_train_epochs")
if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0:
raise ValueError("train_dataset does not implement __len__, max_steps has to be specified")
if (
train_dataset is not None
and isinstance(train_dataset, torch.utils.data.IterableDataset)
and args.group_by_length
):
raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset")
self._signature_columns = None
# Mixed precision setup
self.use_apex = False
self.use_cuda_amp = False
self.use_cpu_amp = False
# Mixed precision setup for SageMaker Model Parallel
if is_sagemaker_mp_enabled():
# BF16 + model parallelism in SageMaker: currently not supported, raise an error
if args.bf16:
raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ")
if IS_SAGEMAKER_MP_POST_1_10:
# When there's mismatch between SMP config and trainer argument, use SMP config as truth
if args.fp16 != smp.state.cfg.fp16:
logger.warning(
f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16},"
f"but FP16 provided in trainer argument is {args.fp16},"
f"setting to {smp.state.cfg.fp16}"
)
args.fp16 = smp.state.cfg.fp16
else:
# smp < 1.10 does not support fp16 in trainer.
if hasattr(smp.state.cfg, "fp16"):
logger.warning(
f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, "
"but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer."
)
if args.fp16 or args.bf16:
if args.half_precision_backend == "auto":
if args.device == torch.device("cpu"):
if args.fp16:
raise ValueError("Tried to use `fp16` but it is not supported on cpu")
elif _is_native_cpu_amp_available:
args.half_precision_backend = "cpu_amp"
else:
raise ValueError("Tried to use cpu amp but native cpu amp is not available")
else:
args.half_precision_backend = "cuda_amp"
logger.info(f"Using {args.half_precision_backend} half precision backend")
self.do_grad_scaling = False
if (args.fp16 or args.bf16) and not (args.deepspeed or is_sagemaker_mp_enabled() or is_torch_tpu_available()):
# deepspeed and SageMaker Model Parallel manage their own half precision
if args.half_precision_backend == "cuda_amp":
self.use_cuda_amp = True
self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16
# bf16 does not need grad scaling
self.do_grad_scaling = self.amp_dtype == torch.float16
if self.do_grad_scaling:
if self.sharded_ddp is not None:
self.scaler = ShardedGradScaler()
elif self.fsdp is not None:
from torch.distributed.fsdp.sharded_grad_scaler import (
ShardedGradScaler as FSDPShardedGradScaler,
)
self.scaler = FSDPShardedGradScaler()
elif is_torch_tpu_available():
from torch_xla.amp import GradScaler
self.scaler = GradScaler()
else:
self.scaler = torch.cuda.amp.GradScaler()
elif args.half_precision_backend == "cpu_amp":
self.use_cpu_amp = True
self.amp_dtype = torch.bfloat16
else:
if not is_apex_available():
raise ImportError(
"Using FP16 with APEX but APEX is not installed, please refer to"
" https://www.github.com/nvidia/apex."
)
self.use_apex = True
# FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error.
if (
is_sagemaker_mp_enabled()
and self.use_cuda_amp
and args.max_grad_norm is not None
and args.max_grad_norm > 0
):
raise ValueError(
"SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass "
"along 'max_grad_norm': 0 in your hyperparameters."
)
# Label smoothing
if self.args.label_smoothing_factor != 0:
self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)
else:
self.label_smoother = None
self.state = TrainerState(
is_local_process_zero=self.is_local_process_zero(),
is_world_process_zero=self.is_world_process_zero(),
)
self.control = TrainerControl()
# Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then
# returned to 0 every time flos need to be logged
self.current_flos = 0
self.hp_search_backend = None
self.use_tune_checkpoints = False
default_label_names = find_labels(self.model.__class__)
self.label_names = default_label_names if self.args.label_names is None else self.args.label_names
self.can_return_loss = can_return_loss(self.model.__class__)
self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)
# Internal variables to keep track of the original batch size
self._train_batch_size = args.train_batch_size
# very last
self._memory_tracker.stop_and_update_metrics()
# torch.compile
if args.torch_compile and not is_torch_compile_available():
raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.")
def add_callback(self, callback):
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
self.callback_handler.remove_callback(callback)
def _move_model_to_device(self, model, device):
model = model.to(device)
# Moving a model to an XLA device disconnects the tied weights, so we have to retie them.
if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"):
model.tie_weights()
def _set_signature_columns_if_needed(self):
if self._signature_columns is None:
# Inspect model forward signature to keep only the arguments it accepts.
signature = inspect.signature(self.model.forward)
self._signature_columns = list(signature.parameters.keys())
# Labels may be named label or label_ids, the default data collator handles that.
self._signature_columns += list(set(["label", "label_ids"] + self.label_names))
def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None):
if not self.args.remove_unused_columns:
return dataset
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
ignored_columns = list(set(dataset.column_names) - set(signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if description is None else f"in the {description} set"
logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}."
f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, "
" you can safely ignore this message."
)
columns = [k for k in signature_columns if k in dataset.column_names]
if version.parse(datasets.__version__) < version.parse("1.4.0"):
dataset.set_format(
type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"]
)
return dataset
else:
return dataset.remove_columns(ignored_columns)
def _get_collator_with_removed_columns(
self, data_collator: Callable, description: Optional[str] = None
) -> Callable:
if not self.args.remove_unused_columns:
return data_collator
self._set_signature_columns_if_needed()
signature_columns = self._signature_columns
remove_columns_collator = RemoveColumnsCollator(
data_collator=data_collator,
signature_columns=signature_columns,
logger=logger,
description=description,
model_name=self.model.__class__.__name__,
)
return remove_columns_collator
def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]:
if self.train_dataset is None or not has_length(self.train_dataset):
return None
generator = None
if self.args.world_size <= 1:
generator = torch.Generator()
# for backwards compatibility, we generate a seed here (which is sampled from a generator seeded with
# `args.seed`) if data_seed isn't provided.
# Further on in this method, we default to `args.seed` instead.
if self.args.data_seed is None:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
else:
seed = self.args.data_seed
generator.manual_seed(seed)
seed = self.args.data_seed if self.args.data_seed is not None else self.args.seed
# Build the sampler.
if self.args.group_by_length:
if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset):
lengths = (
self.train_dataset[self.args.length_column_name]
if self.args.length_column_name in self.train_dataset.column_names
else None
)
else:
lengths = None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.args.train_batch_size * self.args.gradient_accumulation_steps,
dataset=self.train_dataset,
lengths=lengths,
model_input_name=model_input_name,
generator=generator,
)
else:
return DistributedLengthGroupedSampler(
self.args.train_batch_size * self.args.gradient_accumulation_steps,
dataset=self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=lengths,
model_input_name=model_input_name,
seed=seed,
)
else:
if self.args.world_size <= 1:
return RandomSampler(self.train_dataset, generator=generator)
elif (
self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL]
and not self.args.dataloader_drop_last
):
# Use a loop for TPUs when drop_last is False to have all batches have the same size.
return DistributedSamplerWithLoop(
self.train_dataset,
batch_size=self.args.per_device_train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=seed,
)
else:
return DistributedSampler(
self.train_dataset,
num_replicas=self.args.world_size,
rank=self.args.process_index,
seed=seed,
)
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
data_collator = self.data_collator
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
train_dataset = self._remove_unused_columns(train_dataset, description="training")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="training")
if isinstance(train_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
train_dataset = IterableDatasetShard(
train_dataset,
batch_size=self._train_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
train_dataset,
batch_size=self._train_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
train_sampler = self._get_train_sampler()
return DataLoader(
train_dataset,
batch_size=self._train_batch_size,
sampler=train_sampler,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
worker_init_fn=seed_worker,
)
def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]:
# Deprecated code
if self.args.use_legacy_prediction_loop:
if is_torch_tpu_available():
return SequentialDistributedSampler(
eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()
)
elif is_sagemaker_mp_enabled():
return SequentialDistributedSampler(
eval_dataset,
num_replicas=smp.dp_size(),
rank=smp.dp_rank(),
batch_size=self.args.per_device_eval_batch_size,
)
elif self.args.local_rank != -1:
return SequentialDistributedSampler(eval_dataset)
else:
return SequentialSampler(eval_dataset)
if self.args.world_size <= 1:
return SequentialSampler(eval_dataset)
else:
return ShardSampler(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
data_collator = self.data_collator
if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):
eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation")
if isinstance(eval_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
eval_dataset = IterableDatasetShard(
eval_dataset,
batch_size=self.args.per_device_eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
eval_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
eval_sampler = self._get_eval_sampler(eval_dataset)
return DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
data_collator = self.data_collator
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
test_dataset = self._remove_unused_columns(test_dataset, description="test")
else:
data_collator = self._get_collator_with_removed_columns(data_collator, description="test")
if isinstance(test_dataset, torch.utils.data.IterableDataset):
if self.args.world_size > 1:
test_dataset = IterableDatasetShard(
test_dataset,
batch_size=self.args.eval_batch_size,
drop_last=self.args.dataloader_drop_last,
num_processes=self.args.world_size,
process_index=self.args.process_index,
)
return DataLoader(
test_dataset,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
test_sampler = self._get_eval_sampler(test_dataset)
# We use the same batch_size as for eval.
return DataLoader(
test_dataset,
sampler=test_sampler,
batch_size=self.args.eval_batch_size,
collate_fn=data_collator,
drop_last=self.args.dataloader_drop_last,
num_workers=self.args.dataloader_num_workers,
pin_memory=self.args.dataloader_pin_memory,
)
def create_optimizer_and_scheduler(self, num_training_steps: int):
self.create_optimizer()
if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16:
# If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer
optimizer = self.optimizer.optimizer
else:
optimizer = self.optimizer
self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
def create_optimizer(self):
opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if self.optimizer is None:
decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad)
],
"weight_decay": self.args.weight_decay,
},
{
"params": [
p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad)
],
"weight_decay": 0.0,
},
]
optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args)
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer = OSS(
params=optimizer_grouped_parameters,
optim=optimizer_cls,
**optimizer_kwargs,
)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if optimizer_cls.__name__ == "Adam8bit":
import bitsandbytes
manager = bitsandbytes.optim.GlobalOptimManager.get_instance()
skipped = 0
for module in opt_model.modules():
if isinstance(module, nn.Embedding):
skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values())
print(f"skipped {module}: {skipped/2**20}M params")
manager.register_module_override(module, "weight", {"optim_bits": 32})
logger.debug(f"bitsandbytes: will optimize {module} in fp32")
print(f"skipped: {skipped/2**20}M params")
if is_sagemaker_mp_enabled():
self.optimizer = smp.DistributedOptimizer(self.optimizer)
return self.optimizer
@staticmethod
def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]:
# parse args.optim_args
optim_args = {}
if args.optim_args:
for mapping in args.optim_args.replace(" ", "").split(","):
key, value = mapping.split("=")
optim_args[key] = value
optimizer_kwargs = {"lr": args.learning_rate}
adam_kwargs = {
"betas": (args.adam_beta1, args.adam_beta2),
"eps": args.adam_epsilon,
}
if args.optim == OptimizerNames.ADAFACTOR:
optimizer_cls = Adafactor
optimizer_kwargs.update({"scale_parameter": False, "relative_step": False})
elif args.optim == OptimizerNames.ADAMW_HF:
from transformers.optimization import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]:
from torch.optim import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
if args.optim == OptimizerNames.ADAMW_TORCH_FUSED:
optimizer_kwargs.update({"fused": True})
elif args.optim == OptimizerNames.ADAMW_TORCH_XLA:
try:
from torch_xla.amp.syncfree import AdamW
optimizer_cls = AdamW
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.")
elif args.optim == OptimizerNames.ADAMW_APEX_FUSED:
try:
from apex.optimizers import FusedAdam
optimizer_cls = FusedAdam
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!")
elif args.optim == OptimizerNames.ADAMW_BNB:
try:
from bitsandbytes.optim import Adam8bit
optimizer_cls = Adam8bit
optimizer_kwargs.update(adam_kwargs)
except ImportError:
raise ValueError("Trainer tried to instantiate bnb Adam8bit but bnb is not installed!")
elif args.optim == OptimizerNames.ADAMW_ANYPRECISION:
try:
from torchdistx.optimizers import AnyPrecisionAdamW
optimizer_cls = AnyPrecisionAdamW
optimizer_kwargs.update(adam_kwargs)
# TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx.
optimizer_kwargs.update(
{
"use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")),
"momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")),
"variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")),
"compensation_buffer_dtype": getattr(
torch, optim_args.get("compensation_buffer_dtype", "bfloat16")
),
}
)
except ImportError:
raise ValueError("Please install https://github.com/pytorch/torchdistx")
elif args.optim == OptimizerNames.SGD:
optimizer_cls = torch.optim.SGD
elif args.optim == OptimizerNames.ADAGRAD:
optimizer_cls = torch.optim.Adagrad
else:
raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}")
return optimizer_cls, optimizer_kwargs
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
optimizer=self.optimizer if optimizer is None else optimizer,
num_warmup_steps=self.args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
return self.lr_scheduler
def num_examples(self, dataloader: DataLoader) -> int:
try:
dataset = dataloader.dataset
# Special case for IterableDatasetShard, we need to dig deeper
if isinstance(dataset, IterableDatasetShard):
return len(dataloader.dataset.dataset)
return len(dataloader.dataset)
except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader
return len(dataloader) * self.args.per_device_train_batch_size
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
self._trial = trial
if self.hp_search_backend is None or trial is None:
return
if self.hp_search_backend == HPSearchBackend.OPTUNA:
params = self.hp_space(trial)
elif self.hp_search_backend == HPSearchBackend.RAY:
params = trial
params.pop("wandb", None)
elif self.hp_search_backend == HPSearchBackend.SIGOPT:
params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()}
elif self.hp_search_backend == HPSearchBackend.WANDB:
params = trial
for key, value in params.items():
if not hasattr(self.args, key):
logger.warning(
f"Trying to set {key} in the hyperparameter search but there is no corresponding field in"
" `TrainingArguments`."
)
continue
old_attr = getattr(self.args, key, None)
# Casting value to the proper type
if old_attr is not None:
value = type(old_attr)(value)
setattr(self.args, key, value)
if self.hp_search_backend == HPSearchBackend.OPTUNA:
logger.info(f"Trial: {trial.params}")
if self.hp_search_backend == HPSearchBackend.SIGOPT:
logger.info(f"SigOpt Assignments: {trial.assignments}")
if self.hp_search_backend == HPSearchBackend.WANDB:
logger.info(f"W&B Sweep parameters: {trial}")
if self.args.deepspeed:
# Rebuild the deepspeed config to reflect the updated training parameters
from transformers.deepspeed import HfTrainerDeepSpeedConfig
self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed)
self.args.hf_deepspeed_config.trainer_config_process(self.args)
def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]):
if self.hp_search_backend is None or trial is None:
return
self.objective = self.compute_objective(metrics.copy())
if self.hp_search_backend == HPSearchBackend.OPTUNA:
import optuna
trial.report(self.objective, step)
if trial.should_prune():
self.callback_handler.on_train_end(self.args, self.state, self.control)
raise optuna.TrialPruned()
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
if self.control.should_save:
self._tune_save_checkpoint()
tune.report(objective=self.objective, **metrics)
def _tune_save_checkpoint(self):
from ray import tune
if not self.use_tune_checkpoints:
return
with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:
output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
self.save_model(output_dir, _internal_call=True)
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
def call_model_init(self, trial=None):
model_init_argcount = number_of_arguments(self.model_init)
if model_init_argcount == 0:
model = self.model_init()
elif model_init_argcount == 1:
model = self.model_init(trial)
else:
raise RuntimeError("model_init should have 0 or 1 argument.")
if model is None:
raise RuntimeError("model_init should not return None.")
return model
def torch_jit_model_eval(self, model, dataloader, training=False):
if not training:
if dataloader is None:
logger.warning("failed to use PyTorch jit mode due to current dataloader is none.")
return model
example_batch = next(iter(dataloader))
example_batch = self._prepare_inputs(example_batch)
try:
jit_model = model.eval()
with ContextManagers([self.autocast_smart_context_manager(cache_enabled=False), torch.no_grad()]):
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.14.0"):
if isinstance(example_batch, dict):
jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False)
else:
jit_model = torch.jit.trace(
jit_model,
example_kwarg_inputs={key: example_batch[key] for key in example_batch},
strict=False,
)
else:
jit_inputs = []
for key in example_batch:
example_tensor = torch.ones_like(example_batch[key])
jit_inputs.append(example_tensor)
jit_inputs = tuple(jit_inputs)
jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False)
jit_model = torch.jit.freeze(jit_model)
with torch.no_grad():
jit_model(**example_batch)
jit_model(**example_batch)
model = jit_model
self.use_cpu_amp = False
self.use_cuda_amp = False
except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e:
logger.warning(f"failed to use PyTorch jit mode due to: {e}.")
return model
def ipex_optimize_model(self, model, training=False, dtype=torch.float32):
if not is_ipex_available():
raise ImportError(
"Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer"
" to https://github.com/intel/intel-extension-for-pytorch."
)
import intel_extension_for_pytorch as ipex
if not training:
model.eval()
dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype
# conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings
model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train)
else:
if not model.training:
model.train()
model, self.optimizer = ipex.optimize(
model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1"
)
return model
def _wrap_model(self, model, training=True, dataloader=None):
if self.args.torch_compile:
model = torch.compile(model, backend=self.args.torch_compile_backend, mode=self.args.torch_compile_mode)
if self.args.use_ipex:
dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32
model = self.ipex_optimize_model(model, training, dtype=dtype)
if is_sagemaker_mp_enabled():
# Wrapping the base model twice in a DistributedModel will raise an error.
if isinstance(self.model_wrapped, smp.model.DistributedModel):
return self.model_wrapped
return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps)
# already initialized its own DDP and AMP
if self.deepspeed:
return self.deepspeed
# train/eval could be run multiple-times - if already wrapped, don't re-wrap it again
if unwrap_model(model) is not model:
return model
# Mixed precision training with apex (torch < 1.6)
if self.use_apex and training:
model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)
# Multi-gpu training (should be after apex fp16 initialization)
if self.args.n_gpu > 1:
model = nn.DataParallel(model)
if self.args.jit_mode_eval:
start_time = time.time()
model = self.torch_jit_model_eval(model, dataloader, training)
self.jit_compilation_time = round(time.time() - start_time, 4)
# Note: in torch.distributed mode, there's no point in wrapping the model
# inside a DistributedDataParallel as we'll be under `no_grad` anyways.
if not training:
return model
# Distributed training (should be after apex fp16 initialization)
if self.sharded_ddp is not None:
# Sharded DDP!
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
model = ShardedDDP(model, self.optimizer)
else:
mixed_precision = self.args.fp16 or self.args.bf16
cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp
zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3
# XXX: Breaking the self.model convention but I see no way around it for now.
if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp:
model = auto_wrap(model)
self.model = model = FullyShardedDDP(
model,
mixed_precision=mixed_precision,
reshard_after_forward=zero_3,
cpu_offload=cpu_offload,
).to(self.args.device)
# Distributed training using PyTorch FSDP
elif self.fsdp is not None:
if not self.args.fsdp_config["xla"]:
# PyTorch FSDP!
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, MixedPrecision
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy
if FSDPOption.OFFLOAD in self.args.fsdp:
cpu_offload = CPUOffload(offload_params=True)
else:
cpu_offload = CPUOffload(offload_params=False)
auto_wrap_policy = None
if FSDPOption.AUTO_WRAP in self.args.fsdp:
if self.args.fsdp_config["fsdp_min_num_params"] > 0:
auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"]
)
elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None:
transformer_cls_to_wrap = set()
for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]:
transformer_cls = get_module_class_from_name(model, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
# Transformer layer class to wrap
transformer_layer_cls=transformer_cls_to_wrap,
)
mixed_precision_policy = None
dtype = None
if self.args.fp16:
dtype = torch.float16
elif self.args.bf16:
dtype = torch.bfloat16
if dtype is not None:
mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype)
if type(model) != FSDP:
# XXX: Breaking the self.model convention but I see no way around it for now.
self.model = model = FSDP(
model,
sharding_strategy=self.fsdp,
cpu_offload=cpu_offload,
auto_wrap_policy=auto_wrap_policy,
mixed_precision=mixed_precision_policy,
device_id=self.args.device,
backward_prefetch=self.backward_prefetch,
forward_prefetch=self.forword_prefetch,
limit_all_gathers=self.limit_all_gathers,
)
else:
try:
from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP
from torch_xla.distributed.fsdp import checkpoint_module
from torch_xla.distributed.fsdp.wrap import (
size_based_auto_wrap_policy,
transformer_auto_wrap_policy,
)
except ImportError:
raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.")
auto_wrap_policy = None
auto_wrapper_callable = None
if self.args.fsdp_config["fsdp_min_num_params"] > 0:
auto_wrap_policy = functools.partial(
size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"]
)
elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None:
transformer_cls_to_wrap = set()
for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]:
transformer_cls = get_module_class_from_name(model, layer_class)
if transformer_cls is None:
raise Exception("Could not find the transformer layer class to wrap in the model.")
else:
transformer_cls_to_wrap.add(transformer_cls)
auto_wrap_policy = functools.partial(
transformer_auto_wrap_policy,
# Transformer layer class to wrap
transformer_layer_cls=transformer_cls_to_wrap,
)
fsdp_kwargs = self.args.xla_fsdp_config
if self.args.fsdp_config["xla_fsdp_grad_ckpt"]:
# Apply gradient checkpointing to auto-wrapped sub-modules if specified
def auto_wrapper_callable(m, *args, **kwargs):
return FSDP(checkpoint_module(m), *args, **kwargs)
# Wrap the base model with an outer FSDP wrapper
self.model = model = FSDP(
model,
auto_wrap_policy=auto_wrap_policy,
auto_wrapper_callable=auto_wrapper_callable,
**fsdp_kwargs,
)
# Patch `xm.optimizer_step` should not reduce gradients in this case,
# as FSDP does not need gradient reduction over sharded parameters.
def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}):
loss = optimizer.step(**optimizer_args)
if barrier:
xm.mark_step()
return loss
xm.optimizer_step = patched_optimizer_step
elif is_sagemaker_dp_enabled():
model = nn.parallel.DistributedDataParallel(
model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))]
)
elif self.args.local_rank != -1:
kwargs = {}
if self.args.ddp_find_unused_parameters is not None:
kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters
elif isinstance(model, PreTrainedModel):
# find_unused_parameters breaks checkpointing as per
# https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021
kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing
else:
kwargs["find_unused_parameters"] = True
if self.args.ddp_bucket_cap_mb is not None:
kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb
if is_torch_neuroncore_available():
return model
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[self.args.local_rank] if self.args._n_gpu != 0 else None,
output_device=self.args.local_rank if self.args._n_gpu != 0 else None,
**kwargs,
)
return model
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
ignore_keys_for_eval: Optional[List[str]] = None,
**kwargs,
):
if resume_from_checkpoint is False:
resume_from_checkpoint = None
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train:
self._move_model_to_device(self.model, args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
self._train_batch_size = self.args.train_batch_size
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled() and args.deepspeed is None:
self._load_from_checkpoint(resume_from_checkpoint)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
inner_training_loop = find_executable_batch_size(
self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size
)
return inner_training_loop(
args=args,
resume_from_checkpoint=resume_from_checkpoint,
trial=trial,
ignore_keys_for_eval=ignore_keys_for_eval,
)
def _inner_training_loop(
self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None
):
self._train_batch_size = batch_size
# Data loader and number of training steps
train_dataloader = self.get_train_dataloader()
# Setting up training control variables:
# number of training epochs: num_train_epochs
# number of training steps per epoch: num_update_steps_per_epoch
# total number of training steps to execute: max_steps
total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size
len_dataloader = None
if has_length(train_dataloader):
len_dataloader = len(train_dataloader)
num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
num_examples = self.num_examples(train_dataloader)
if args.max_steps > 0:
max_steps = args.max_steps
num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(
args.max_steps % num_update_steps_per_epoch > 0
)
# May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's
# the best we can do.
num_train_samples = args.max_steps * total_train_batch_size
else:
max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch)
num_train_epochs = math.ceil(args.num_train_epochs)
num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs
elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size
max_steps = args.max_steps
# Setting a very large number of epochs so we go as many times as necessary over the iterator.
num_train_epochs = sys.maxsize
num_update_steps_per_epoch = max_steps
num_examples = total_train_batch_size * args.max_steps
num_train_samples = args.max_steps * total_train_batch_size
else:
raise ValueError(
"args.max_steps must be set to a positive value if dataloader does not have a length, was"
f" {args.max_steps}"
)
if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:
if self.args.n_gpu > 1:
# nn.DataParallel(model) replicates the model, creating new variables and module
# references registered here no longer work on other gpus, breaking the module
raise ValueError(
"Currently --debug underflow_overflow is not supported under DP. Please use DDP"
" (torch.distributed.launch)."
)
else:
debug_overflow = DebugUnderflowOverflow(self.model) # noqa
delay_optimizer_creation = (
self.sharded_ddp is not None
and self.sharded_ddp != ShardedDDPOption.SIMPLE
or is_sagemaker_mp_enabled()
or self.fsdp is not None
)
if args.deepspeed:
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
elif not delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
self.state = TrainerState()
self.state.is_hyper_param_search = trial is not None
# Activate gradient checkpointing if needed
if args.gradient_checkpointing:
self.model.gradient_checkpointing_enable()
model = self._wrap_model(self.model_wrapped)
if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None:
self._load_from_checkpoint(resume_from_checkpoint, model)
# for the rest of this function `model` is the outside model, whether it was wrapped or not
if model is not self.model:
self.model_wrapped = model
if delay_optimizer_creation:
self.create_optimizer_and_scheduler(num_training_steps=max_steps)
# Check if saved optimizer or scheduler states exist
self._load_optimizer_and_scheduler(resume_from_checkpoint)
# important: at this point:
# self.model is the Transformers Model
# self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.
# Train!
logger.info("***** Running training *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {max_steps}")
logger.info(
f" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}"
)
self.state.epoch = 0
start_time = time.time()
epochs_trained = 0
steps_trained_in_current_epoch = 0
steps_trained_progress_bar = None
# Check if continuing training from a checkpoint
if resume_from_checkpoint is not None and os.path.isfile(
os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)
):
self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))
epochs_trained = self.state.global_step // num_update_steps_per_epoch
if not args.ignore_data_skip:
steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)
steps_trained_in_current_epoch *= args.gradient_accumulation_steps
else:
steps_trained_in_current_epoch = 0
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(f" Continuing training from epoch {epochs_trained}")
logger.info(f" Continuing training from global step {self.state.global_step}")
if not args.ignore_data_skip:
if skip_first_batches is None:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first"
f" {steps_trained_in_current_epoch} batches in the first epoch. If this takes a lot of time,"
" you can install the latest version of Accelerate with `pip install -U accelerate`.You can"
" also add the `--ignore_data_skip` flag to your launch command, but you will resume the"
" training on data already seen by your model."
)
else:
logger.info(
f" Will skip the first {epochs_trained} epochs then the first"
f" {steps_trained_in_current_epoch} batches in the first epoch."
)
if self.is_local_process_zero() and not args.disable_tqdm and skip_first_batches is None:
steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch)
steps_trained_progress_bar.set_description("Skipping the first batches")
# Update the references
self.callback_handler.model = self.model
self.callback_handler.optimizer = self.optimizer
self.callback_handler.lr_scheduler = self.lr_scheduler
self.callback_handler.train_dataloader = train_dataloader
if self.hp_name is not None and self._trial is not None:
# use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial
# parameter to Train when using DDP.
self.state.trial_name = self.hp_name(self._trial)
if trial is not None:
assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial
self.state.trial_params = hp_params(assignments)
else:
self.state.trial_params = None
# This should be the same if the state has been saved but in case the training arguments changed, it's safer
# to set this after the load.
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
# tr_loss is a tensor to avoid synchronization of TPUs through .item()
tr_loss = torch.tensor(0.0).to(args.device)
# _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses
self._total_loss_scalar = 0.0
self._globalstep_last_logged = self.state.global_step
model.zero_grad()
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
# Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.
if not args.ignore_data_skip:
for epoch in range(epochs_trained):
is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance(
train_dataloader.sampler, RandomSampler
)
if is_torch_less_than_1_11 or not is_random_sampler:
# We just need to begin an iteration to create the randomization of the sampler.
# That was before PyTorch 1.11 however...
for _ in train_dataloader:
break
else:
# Otherwise we need to call the whooooole sampler cause there is some random operation added
# AT THE VERY END!
_ = list(train_dataloader.sampler)
total_batched_samples = 0
for epoch in range(epochs_trained, num_train_epochs):
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
elif hasattr(train_dataloader, "dataset") and isinstance(train_dataloader.dataset, IterableDatasetShard):
train_dataloader.dataset.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device)
epoch_iterator = parallel_loader
else:
epoch_iterator = train_dataloader
# Reset the past mems state at the beginning of each epoch if necessary.
if args.past_index >= 0:
self._past = None
steps_in_epoch = (
len(epoch_iterator)
if len_dataloader is not None
else args.max_steps * args.gradient_accumulation_steps
)
self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control)
if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
rng_to_sync = False
steps_skipped = 0
if skip_first_batches is not None and steps_trained_in_current_epoch > 0:
epoch_iterator = skip_first_batches(epoch_iterator, steps_trained_in_current_epoch)
steps_skipped = steps_trained_in_current_epoch
steps_trained_in_current_epoch = 0
rng_to_sync = True
step = -1
for step, inputs in enumerate(epoch_iterator):
total_batched_samples += 1
if rng_to_sync:
self._load_rng_state(resume_from_checkpoint)
rng_to_sync = False
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
if steps_trained_progress_bar is not None:
steps_trained_progress_bar.update(1)
if steps_trained_in_current_epoch == 0:
self._load_rng_state(resume_from_checkpoint)
continue
elif steps_trained_progress_bar is not None:
steps_trained_progress_bar.close()
steps_trained_progress_bar = None
if step % args.gradient_accumulation_steps == 0:
self.control = self.callback_handler.on_step_begin(args, self.state, self.control)
if (
(total_batched_samples % args.gradient_accumulation_steps != 0)
and args.local_rank != -1
and args._no_sync_in_gradient_accumulation
):
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
tr_loss_step = self.training_step(model, inputs)
else:
tr_loss_step = self.training_step(model, inputs)
if (
args.logging_nan_inf_filter
and not is_torch_tpu_available()
and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))
):
# if loss is nan or inf simply add the average of previous logged losses
tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged)
else:
tr_loss += tr_loss_step
self.current_flos += float(self.floating_point_ops(inputs))
# Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps
if self.deepspeed:
self.deepspeed.step()
if total_batched_samples % args.gradient_accumulation_steps == 0 or (
# last step in epoch but step is always smaller than gradient_accumulation_steps
steps_in_epoch <= args.gradient_accumulation_steps
and (step + 1) == steps_in_epoch
):
# Gradient clipping
if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:
# deepspeed does its own clipping
if self.do_grad_scaling:
# Reduce gradients first for XLA
if is_torch_tpu_available():
gradients = xm._fetch_gradients(self.optimizer)
xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size())
# AMP: gradients need unscaling
self.scaler.unscale_(self.optimizer)
if is_sagemaker_mp_enabled() and args.fp16:
self.optimizer.clip_master_grads(args.max_grad_norm)
elif hasattr(self.optimizer, "clip_grad_norm"):
# Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping
self.optimizer.clip_grad_norm(args.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
# Some models (like FullyShardedDDP) have a specific way to do gradient clipping
model.clip_grad_norm_(args.max_grad_norm)
else:
# Revert to normal clipping otherwise, handling Apex or full precision
nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer) if self.use_apex else model.parameters(),
args.max_grad_norm,
)
# Optimizer step
optimizer_was_run = True
if self.deepspeed:
pass # called outside the loop
elif is_torch_tpu_available():
if self.do_grad_scaling:
self.scaler.step(self.optimizer)
self.scaler.update()
else:
xm.optimizer_step(self.optimizer)
elif self.do_grad_scaling:
scale_before = self.scaler.get_scale()
self.scaler.step(self.optimizer)
self.scaler.update()
scale_after = self.scaler.get_scale()
optimizer_was_run = scale_before <= scale_after
else:
self.optimizer.step()
if optimizer_was_run and not self.deepspeed:
self.lr_scheduler.step()
model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch
self.control = self.callback_handler.on_step_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
if step < 0:
logger.warning(
"There seems to be not a single sample in your epoch_iterator, stopping training at step"
f" {self.state.global_step}! This is expected if you're using an IterableDataset and set"
f" num_steps ({max_steps}) higher than the number of available samples."
)
self.control.should_training_stop = True
self.control = self.callback_handler.on_epoch_end(args, self.state, self.control)
self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
if is_torch_tpu_available():
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
else:
logger.warning(
"You enabled PyTorch/XLA debug metrics but you don't have a TPU "
"configured. Check your training configuration if this is unexpected."
)
if self.control.should_training_stop:
break
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n")
if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:
# Wait for everyone to get here so we are sur the model has been saved by process 0.
if is_torch_tpu_available():
xm.rendezvous("load_best_model_at_end")
elif args.local_rank != -1:
dist.barrier()
elif is_sagemaker_mp_enabled():
smp.barrier()
self._load_best_model()
# add remaining tr_loss
self._total_loss_scalar += tr_loss.item()
train_loss = self._total_loss_scalar / self.state.global_step
metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps)
self.store_flos()
metrics["total_flos"] = self.state.total_flos
metrics["train_loss"] = train_loss
self.is_in_train = False
self._memory_tracker.stop_and_update_metrics(metrics)
self.log(metrics)
run_dir = self._get_output_dir(trial)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir)
# Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save.
if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1:
for checkpoint in checkpoints_sorted:
if checkpoint != self.state.best_model_checkpoint:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint)
self.control = self.callback_handler.on_train_end(args, self.state, self.control)
return TrainOutput(self.state.global_step, train_loss, metrics)
def _get_output_dir(self, trial):
if self.hp_search_backend is not None and trial is not None:
if self.hp_search_backend == HPSearchBackend.OPTUNA:
run_id = trial.number
elif self.hp_search_backend == HPSearchBackend.RAY:
from ray import tune
run_id = tune.get_trial_id()
elif self.hp_search_backend == HPSearchBackend.SIGOPT:
run_id = trial.id
elif self.hp_search_backend == HPSearchBackend.WANDB:
import wandb
run_id = wandb.run.id
run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}"
run_dir = os.path.join(self.args.output_dir, run_name)
else:
run_dir = self.args.output_dir
return run_dir
def _load_from_checkpoint(self, resume_from_checkpoint, model=None):
if model is None:
model = self.model
if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)) and not os.path.isfile(
os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME)
):
raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}")
logger.info(f"Loading model from {resume_from_checkpoint}.")
if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)):
config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME))
checkpoint_version = config.transformers_version
if checkpoint_version is not None and checkpoint_version != __version__:
logger.warning(
f"You are resuming training from a checkpoint trained with {checkpoint_version} of "
f"Transformers but your current version is {__version__}. This is not recommended and could "
"yield to errors or unwanted behaviors."
)
if os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)):
# If the model is on the GPU, it still works!
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")):
# If the 'user_content.pt' file exists, load with the new smp api.
# Checkpoint must have been saved with the new smp api.
smp.resume_from_checkpoint(
path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False
)
else:
# If the 'user_content.pt' file does NOT exist, load with the old smp api.
# Checkpoint must have been saved with the old smp api.
if hasattr(self.args, "fp16") and self.args.fp16 is True:
logger.warning(
"Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported."
)
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# Required for smp to not auto-translate state_dict from hf to smp (is already smp).
state_dict["_smp_is_partial"] = False
load_result = model.load_state_dict(state_dict, strict=True)
# release memory
del state_dict
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu")
# workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
# which takes *args instead of **kwargs
load_result = model.load_state_dict(state_dict, False)
# release memory
del state_dict
self._issue_warnings_after_load(load_result)
else:
# We load the sharded checkpoint
load_result = load_sharded_checkpoint(model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled())
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
def _load_best_model(self):
logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).")
best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)
model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if os.path.exists(best_model_path):
if self.deepspeed:
if self.model_wrapped is not None:
# this removes the pre-hooks from the previous engine
self.model_wrapped.destroy()
self.model_wrapped = None
# temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping
deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(
self,
num_training_steps=self.args.max_steps,
resume_from_checkpoint=self.state.best_model_checkpoint,
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
else:
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")):
# If the 'user_content.pt' file exists, load with the new smp api.
# Checkpoint must have been saved with the new smp api.
smp.resume_from_checkpoint(
path=self.state.best_model_checkpoint,
tag=WEIGHTS_NAME,
partial=False,
load_optimizer=False,
)
else:
# If the 'user_content.pt' file does NOT exist, load with the old smp api.
# Checkpoint must have been saved with the old smp api.
state_dict = torch.load(best_model_path, map_location="cpu")
state_dict["_smp_is_partial"] = False
load_result = model.load_state_dict(state_dict, strict=True)
else:
# We load the model state dict on the CPU to avoid an OOM error.
state_dict = torch.load(best_model_path, map_location="cpu")
# If the model is on the GPU, it still works!
# workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963
# which takes *args instead of **kwargs
load_result = model.load_state_dict(state_dict, False)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)):
load_result = load_sharded_checkpoint(
model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled()
)
if not is_sagemaker_mp_enabled():
self._issue_warnings_after_load(load_result)
else:
logger.warning(
f"Could not locate the best model at {best_model_path}, if you are running a distributed training "
"on multiple nodes, you should activate `--save_on_each_node`."
)
def _issue_warnings_after_load(self, load_result):
if len(load_result.missing_keys) != 0:
if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set(
self.model._keys_to_ignore_on_save
):
self.model.tie_weights()
else:
logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.")
if len(load_result.unexpected_keys) != 0:
logger.warning(
f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}."
)
def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval):
if self.control.should_log:
if is_torch_tpu_available():
xm.mark_step()
logs: Dict[str, float] = {}
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
logs["learning_rate"] = self._get_learning_rate()
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
self.log(logs)
metrics = None
if self.control.should_evaluate:
if isinstance(self.eval_dataset, dict):
for eval_dataset_name, eval_dataset in self.eval_dataset.items():
metrics = self.evaluate(
eval_dataset=eval_dataset,
ignore_keys=ignore_keys_for_eval,
metric_key_prefix=f"eval_{eval_dataset_name}",
)
else:
metrics = self.evaluate(ignore_keys=ignore_keys_for_eval)
self._report_to_hp_search(trial, self.state.global_step, metrics)
if self.control.should_save:
self._save_checkpoint(model, trial, metrics=metrics)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
def _load_rng_state(self, checkpoint):
# Load RNG states from `checkpoint`
if checkpoint is None:
return
if self.args.world_size > 1:
process_index = self.args.process_index
rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth")
if not os.path.isfile(rng_file):
logger.info(
f"Didn't find an RNG file for process {process_index}, if you are resuming a training that "
"wasn't launched in a distributed fashion, reproducibility is not guaranteed."
)
return
else:
rng_file = os.path.join(checkpoint, "rng_state.pth")
if not os.path.isfile(rng_file):
logger.info(
"Didn't find an RNG file, if you are resuming a training that was launched in a distributed "
"fashion, reproducibility is not guaranteed."
)
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state["python"])
np.random.set_state(checkpoint_rng_state["numpy"])
torch.random.set_rng_state(checkpoint_rng_state["cpu"])
if torch.cuda.is_available():
if self.args.local_rank != -1:
torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"])
else:
try:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"])
except Exception as e:
logger.info(
f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}"
"\nThis won't yield the same results as if the training had not been interrupted."
)
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state["xla"])
def _save_checkpoint(self, model, trial, metrics=None):
# In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we
# want to save except FullyShardedDDP.
# assert unwrap_model(model) is self.model, "internal model should be a reference to self.model"
# Save model checkpoint
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
if self.hp_search_backend is None and trial is None:
self.store_flos()
run_dir = self._get_output_dir(trial=trial)
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir, _internal_call=True)
if self.deepspeed:
# under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed
# config `stage3_gather_16bit_weights_on_model_save` is True
self.deepspeed.save_checkpoint(output_dir)
# Save optimizer and scheduler
if self.sharded_ddp == ShardedDDPOption.SIMPLE:
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous("saving_optimizer_states")
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False)
smp.barrier()
if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state:
smp.save(
opt_state_dict,
os.path.join(output_dir, OPTIMIZER_NAME),
partial=True,
v3=smp.state.cfg.shard_optimizer_state,
)
if self.args.should_save:
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
elif self.args.should_save and not self.deepspeed:
# deepspeed.save_checkpoint above saves model/optim/sched
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
# Determine the new best metric / best model checkpoint
if metrics is not None and self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
metric_value = metrics[metric_to_check]
operator = np.greater if self.args.greater_is_better else np.less
if (
self.state.best_metric is None
or self.state.best_model_checkpoint is None
or operator(metric_value, self.state.best_metric)
):
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
# Save the Trainer state
if self.args.should_save:
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
# Save RNG state in non-distributed training
rng_states = {
"python": random.getstate(),
"numpy": np.random.get_state(),
"cpu": torch.random.get_rng_state(),
}
if torch.cuda.is_available():
if self.args.local_rank == -1:
# In non distributed, we save the global CUDA RNG state (will take care of DataParallel)
rng_states["cuda"] = torch.cuda.random.get_rng_state_all()
else:
rng_states["cuda"] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states["xla"] = xm.get_rng_state()
# A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may
# not yet exist.
os.makedirs(output_dir, exist_ok=True)
if self.args.world_size <= 1:
torch.save(rng_states, os.path.join(output_dir, "rng_state.pth"))
else:
torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth"))
if self.args.push_to_hub:
self._push_from_checkpoint(output_dir)
# Maybe delete some older checkpoints.
if self.args.should_save:
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
if checkpoint is None:
return
if self.deepspeed:
# deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init
return
checkpoint_file_exists = (
glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*")
if is_sagemaker_mp_enabled()
else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME))
)
if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)):
# Load in optimizer and scheduler states
if is_torch_tpu_available():
# On TPU we have to take some extra precautions to properly load the states on the right device.
optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu")
with warnings.catch_warnings(record=True) as caught_warnings:
lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu")
reissue_pt_warnings(caught_warnings)
xm.send_cpu_data_to_device(optimizer_state, self.args.device)
xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)
self.optimizer.load_state_dict(optimizer_state)
self.lr_scheduler.load_state_dict(lr_scheduler_state)
else:
map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device
if is_sagemaker_mp_enabled():
if os.path.isfile(os.path.join(checkpoint, "user_content.pt")):
# Optimizer checkpoint was saved with smp >= 1.10
def opt_load_hook(mod, opt):
opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
else:
# Optimizer checkpoint was saved with smp < 1.10
def opt_load_hook(mod, opt):
if IS_SAGEMAKER_MP_POST_1_10:
opt.load_state_dict(
smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True)
)
else:
opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True))
self.model_wrapped.register_post_step_hook(opt_load_hook)
else:
self.optimizer.load_state_dict(
torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location)
)
with warnings.catch_warnings(record=True) as caught_warnings:
self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME)))
reissue_pt_warnings(caught_warnings)
if self.do_grad_scaling and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)):
self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME)))
def hyperparameter_search(
self,
hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None,
compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,
n_trials: int = 20,
direction: str = "minimize",
backend: Optional[Union["str", HPSearchBackend]] = None,
hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
if backend is None:
backend = default_hp_search_backend()
if backend is None:
raise RuntimeError(
"At least one of optuna or ray should be installed. "
"To install optuna run `pip install optuna`. "
"To install ray run `pip install ray[tune]`. "
"To install sigopt run `pip install sigopt`."
)
backend = HPSearchBackend(backend)
if backend == HPSearchBackend.OPTUNA and not is_optuna_available():
raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.")
if backend == HPSearchBackend.RAY and not is_ray_tune_available():
raise RuntimeError(
"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`."
)
if backend == HPSearchBackend.SIGOPT and not is_sigopt_available():
raise RuntimeError("You picked the sigopt backend, but it is not installed. Use `pip install sigopt`.")
if backend == HPSearchBackend.WANDB and not is_wandb_available():
raise RuntimeError("You picked the wandb backend, but it is not installed. Use `pip install wandb`.")
self.hp_search_backend = backend
if self.model_init is None:
raise RuntimeError(
"To use hyperparameter search, you need to pass your model through a model_init function."
)
self.hp_space = default_hp_space[backend] if hp_space is None else hp_space
self.hp_name = hp_name
self.compute_objective = default_compute_objective if compute_objective is None else compute_objective
backend_dict = {
HPSearchBackend.OPTUNA: run_hp_search_optuna,
HPSearchBackend.RAY: run_hp_search_ray,
HPSearchBackend.SIGOPT: run_hp_search_sigopt,
HPSearchBackend.WANDB: run_hp_search_wandb,
}
best_run = backend_dict[backend](self, n_trials, direction, **kwargs)
self.hp_search_backend = None
return best_run
def log(self, logs: Dict[str, float]) -> None:
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
output = {**logs, **{"step": self.state.global_step}}
self.state.log_history.append(output)
self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]:
if isinstance(data, Mapping):
return type(data)({k: self._prepare_input(v) for k, v in data.items()})
elif isinstance(data, (tuple, list)):
return type(data)(self._prepare_input(v) for v in data)
elif isinstance(data, torch.Tensor):
kwargs = {"device": self.args.device}
if self.deepspeed and (torch.is_floating_point(data) or torch.is_complex(data)):
# NLP models inputs are int/uint and those get adjusted to the right dtype of the
# embedding. Other models such as wav2vec2's inputs are already float and thus
# may need special handling to match the dtypes of the model
kwargs.update({"dtype": self.args.hf_deepspeed_config.dtype()})
return data.to(**kwargs)
return data
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
inputs = self._prepare_input(inputs)
if len(inputs) == 0:
raise ValueError(
"The batch received was empty, your model won't be able to train on it. Double-check that your "
f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}."
)
if self.args.past_index >= 0 and self._past is not None:
inputs["mems"] = self._past
return inputs
def compute_loss_context_manager(self):
return self.autocast_smart_context_manager()
def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True):
if self.use_cuda_amp or self.use_cpu_amp:
if is_torch_greater_or_equal_than_1_10:
ctx_manager = (
torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
if self.use_cpu_amp
else torch.cuda.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype)
)
else:
ctx_manager = torch.cuda.amp.autocast()
else:
ctx_manager = contextlib.nullcontext() if sys.version_info >= (3, 7) else contextlib.suppress()
return ctx_manager
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
model.train()
inputs = self._prepare_inputs(inputs)
if is_sagemaker_mp_enabled():
loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps)
return loss_mb.reduce_mean().detach().to(self.args.device)
with self.compute_loss_context_manager():
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.args.gradient_accumulation_steps > 1 and not self.deepspeed:
# deepspeed handles loss scaling by gradient_accumulation_steps in its `backward`
loss = loss / self.args.gradient_accumulation_steps
if self.do_grad_scaling:
self.scaler.scale(loss).backward()
elif self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
# loss gets scaled under gradient_accumulation_steps in deepspeed
loss = self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values():
loss = self.label_smoother(outputs, labels, shift_labels=True)
else:
loss = self.label_smoother(outputs, labels)
else:
if isinstance(outputs, dict) and "loss" not in outputs:
raise ValueError(
"The model did not return a loss from the inputs, only the following keys: "
f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}."
)
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
return smp.rank() == 0
else:
return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
if output_dir is None:
output_dir = self.args.output_dir
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif is_sagemaker_mp_enabled():
# Calling the state_dict needs to be done on the wrapped model and on all processes.
os.makedirs(output_dir, exist_ok=True)
state_dict = self.model_wrapped.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
if IS_SAGEMAKER_MP_POST_1_10:
# 'user_content.pt' indicates model state_dict saved with smp >= 1.10
Path(os.path.join(output_dir, "user_content.pt")).touch()
elif (
ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp
or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp
or self.fsdp is not None
):
state_dict = self.model.state_dict()
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
elif self.deepspeed:
# this takes care of everything as long as we aren't under zero3
if self.args.should_save:
self._save(output_dir)
if is_deepspeed_zero3_enabled():
# It's too complicated to try to override different places where the weights dump gets
# saved, so since under zero3 the file is bogus, simply delete it. The user should
# either user deepspeed checkpoint to resume or to recover full weights use
# zero_to_fp32.py stored in the checkpoint.
if self.args.should_save:
file = os.path.join(output_dir, WEIGHTS_NAME)
if os.path.isfile(file):
# logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights")
os.remove(file)
# now save the real model if stage3_gather_16bit_weights_on_model_save=True
# if false it will not be saved.
# This must be called on all ranks
if not self.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME):
logger.warning(
"deepspeed.save_16bit_model didn't save the model, since"
" stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use"
" zero_to_fp32.py to recover weights"
)
self.deepspeed.save_checkpoint(output_dir)
elif self.args.should_save:
self._save(output_dir)
# Push to the Hub when `save_model` is called by the user.
if self.args.push_to_hub and not _internal_call:
self.push_to_hub(commit_message="Model save")
def _save_tpu(self, output_dir: Optional[str] = None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info(f"Saving model checkpoint to {output_dir}")
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
xm.rendezvous("saving_checkpoint")
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
unwrap_model(self.model).save_pretrained(
output_dir,
is_main_process=self.args.should_save,
state_dict=self.model.state_dict(),
save_function=xm.save,
)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
state_dict = self.model.state_dict()
xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
self.model.save_pretrained(output_dir, is_main_process=self.args.should_save, save_function=xm.save)
if self.tokenizer is not None and self.args.should_save:
self.tokenizer.save_pretrained(output_dir)
def _save(self, output_dir: Optional[str] = None, state_dict=None):
# If we are executing this function, we are the process zero, so we don't check for that.
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model checkpoint to {output_dir}")
# Save a trained model and configuration using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
if not isinstance(self.model, PreTrainedModel):
if isinstance(unwrap_model(self.model), PreTrainedModel):
if state_dict is None:
state_dict = self.model.state_dict()
unwrap_model(self.model).save_pretrained(output_dir, state_dict=filtered_state_dict)
else:
logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.")
if state_dict is None:
state_dict = self.model.state_dict()
torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))
else:
if self.save_prefixencoder:
print("Saving PrefixEncoder")
state_dict = self.model.state_dict()
filtered_state_dict = {}
for k, v in self.model.named_parameters():
if v.requires_grad:
filtered_state_dict[k] = state_dict[k]
self.model.save_pretrained(output_dir, state_dict=filtered_state_dict)
else:
print("Saving the whole model")
self.model.save_pretrained(output_dir, state_dict=state_dict)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
def store_flos(self):
# Storing the number of floating-point operations that went into the model
if self.args.local_rank != -1:
self.state.total_flos += (
distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item()
)
self.current_flos = 0
else:
self.state.total_flos += self.current_flos
self.current_flos = 0
def _sorted_checkpoints(
self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False
) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
# Make sure we don't delete the best model.
if self.state.best_model_checkpoint is not None:
best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))
for i in range(best_model_index, len(checkpoints_sorted) - 2):
checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:
if self.args.save_total_limit is None or self.args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir)
if len(checkpoints_sorted) <= self.args.save_total_limit:
return
# If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which
# we don't do to allow resuming.
save_total_limit = self.args.save_total_limit
if (
self.state.best_model_checkpoint is not None
and self.args.save_total_limit == 1
and checkpoints_sorted[-1] != self.state.best_model_checkpoint
):
save_total_limit = 2
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit")
shutil.rmtree(checkpoint, ignore_errors=True)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
# memory metrics - must set up as early as possible
self._memory_tracker.start()
eval_dataloader = self.get_eval_dataloader(eval_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if self.compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.log(output.metrics)
if DebugOption.TPU_METRICS_DEBUG in self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return output.metrics
def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
# memory metrics - must set up as early as possible
self._memory_tracker.start()
test_dataloader = self.get_test_dataloader(test_dataset)
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
output = eval_loop(
test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
)
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics)
self._memory_tracker.stop_and_update_metrics(output.metrics)
return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
args = self.args
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(
self, num_training_steps=0, resume_from_checkpoint=None, inference=True
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
model = self._wrap_model(self.model, training=False, dataloader=dataloader)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = self.args.eval_batch_size
logger.info(f"***** Running {description} *****")
if has_length(dataloader):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = getattr(dataloader, "dataset", None)
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
if args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
inputs_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
all_inputs = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
if is_torch_tpu_available():
xm.mark_step()
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs_decode is not None:
inputs_decode = self._pad_across_processes(inputs_decode)
inputs_decode = self._nested_gather(inputs_decode)
inputs_host = (
inputs_decode
if inputs_host is None
else nested_concat(inputs_host, inputs_decode, padding_index=-100)
)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
if self.preprocess_logits_for_metrics is not None:
logits = self.preprocess_logits_for_metrics(logits, labels)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode
if all_inputs is None
else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# Set back to None to begin a new accumulation
losses_host, preds_host, inputs_host, labels_host = None, None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if inputs_host is not None:
inputs_decode = nested_numpify(inputs_host)
all_inputs = (
inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100)
)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# Number of samples
if has_length(eval_dataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0:
num_samples = eval_dataset.num_examples
else:
if has_length(dataloader):
num_samples = self.num_examples(dataloader)
else: # both len(dataloader.dataset) and len(dataloader) fail
num_samples = observed_num_examples
if num_samples == 0 and observed_num_examples > 0:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
if all_inputs is not None:
all_inputs = nested_truncate(all_inputs, num_samples)
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
if args.include_inputs_for_metrics:
metrics = self.compute_metrics(
EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs)
)
else:
metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
if hasattr(self, "jit_compilation_time"):
metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
if tensors is None:
return
if is_torch_tpu_available():
if name is None:
name = "nested_gather"
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return tensors
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()})
elif not isinstance(tensor, torch.Tensor):
raise TypeError(
f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors."
)
if len(tensor.shape) < 2:
return tensor
# Gather all sizes
size = torch.tensor(tensor.shape, device=tensor.device)[None]
sizes = self._nested_gather(size).cpu()
max_size = max(s[1] for s in sizes)
# When extracting XLA graphs for compilation, max_size is 0,
# so use inequality to avoid errors.
if tensor.shape[1] >= max_size:
return tensor
# Then pad to the maximum size
old_size = tensor.shape
new_size = list(old_size)
new_size[1] = max_size
new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index
new_tensor[:, : old_size[1]] = tensor
return new_tensor
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names)
# For CLIP-like models capable of returning loss values.
# If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`
# is `True` in `model.forward`.
return_loss = inputs.get("return_loss", None)
if return_loss is None:
return_loss = self.can_return_loss
loss_without_labels = True if len(self.label_names) == 0 and return_loss else False
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels or loss_without_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels or loss_without_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels or loss_without_labels:
with self.compute_loss_context_manager():
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
with self.compute_loss_context_manager():
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return (loss, None, None)
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def init_git_repo(self, at_init: bool = False):
if not self.is_world_process_zero():
return
if self.args.hub_model_id is None:
repo_name = Path(self.args.output_dir).absolute().name
else:
repo_name = self.args.hub_model_id
if "/" not in repo_name:
repo_name = get_full_repo_name(repo_name, token=self.args.hub_token)
# Make sure the repo exists.
create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True)
try:
self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
except EnvironmentError:
if self.args.overwrite_output_dir and at_init:
# Try again after wiping output_dir
shutil.rmtree(self.args.output_dir)
self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token)
else:
raise
self.repo.git_pull()
# By default, ignore the checkpoint folders
if (
not os.path.exists(os.path.join(self.args.output_dir, ".gitignore"))
and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS
):
with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer:
writer.writelines(["checkpoint-*/"])
# Add "*.sagemaker" to .gitignore if using SageMaker
if os.environ.get("SM_TRAINING_ENV"):
self._add_sm_patterns_to_gitignore()
self.push_in_progress = None
def create_model_card(
self,
language: Optional[str] = None,
license: Optional[str] = None,
tags: Union[str, List[str], None] = None,
model_name: Optional[str] = None,
finetuned_from: Optional[str] = None,
tasks: Union[str, List[str], None] = None,
dataset_tags: Union[str, List[str], None] = None,
dataset: Union[str, List[str], None] = None,
dataset_args: Union[str, List[str], None] = None,
):
if not self.is_world_process_zero():
return
training_summary = TrainingSummary.from_trainer(
self,
language=language,
license=license,
tags=tags,
model_name=model_name,
finetuned_from=finetuned_from,
tasks=tasks,
dataset_tags=dataset_tags,
dataset=dataset,
dataset_args=dataset_args,
)
model_card = training_summary.to_model_card()
with open(os.path.join(self.args.output_dir, "README.md"), "w") as f:
f.write(model_card)
def _push_from_checkpoint(self, checkpoint_folder):
# Only push from one node.
if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END:
return
# If we haven't finished the last push, we don't do this one.
if self.push_in_progress is not None and not self.push_in_progress.is_done:
return
output_dir = self.args.output_dir
# To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder
modeling_files = [CONFIG_NAME, WEIGHTS_NAME]
for modeling_file in modeling_files:
if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)):
shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file))
# Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure.
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
# Same for the training arguments
torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))
try:
if self.args.hub_strategy == HubStrategy.CHECKPOINT:
# Temporarily move the checkpoint just saved for the push
tmp_checkpoint = os.path.join(output_dir, "last-checkpoint")
# We have to remove the "last-checkpoint" dir if it exists, otherwise the checkpoint is moved as a
# subfolder.
if os.path.isdir(tmp_checkpoint):
shutil.rmtree(tmp_checkpoint)
shutil.move(checkpoint_folder, tmp_checkpoint)
if self.args.save_strategy == IntervalStrategy.STEPS:
commit_message = f"Training in progress, step {self.state.global_step}"
else:
commit_message = f"Training in progress, epoch {int(self.state.epoch)}"
_, self.push_in_progress = self.repo.push_to_hub(
commit_message=commit_message, blocking=False, auto_lfs_prune=True
)
finally:
if self.args.hub_strategy == HubStrategy.CHECKPOINT:
# Move back the checkpoint to its place
shutil.move(tmp_checkpoint, checkpoint_folder)
def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str:
# If a user calls manually `push_to_hub` with `self.args.push_to_hub = False`, we try to create the repo but
# it might fail.
if not hasattr(self, "repo"):
self.init_git_repo()
model_name = kwargs.pop("model_name", None)
if model_name is None and self.args.should_save:
if self.args.hub_model_id is None:
model_name = Path(self.args.output_dir).name
else:
model_name = self.args.hub_model_id.split("/")[-1]
# Needs to be executed on all processes for TPU training, but will only save on the processed determined by
# self.args.should_save.
self.save_model(_internal_call=True)
# Only push from one node.
if not self.is_world_process_zero():
return
# Cancel any async push in progress if blocking=True. The commits will all be pushed together.
if blocking and self.push_in_progress is not None and not self.push_in_progress.is_done:
self.push_in_progress._process.kill()
self.push_in_progress = None
git_head_commit_url = self.repo.push_to_hub(
commit_message=commit_message, blocking=blocking, auto_lfs_prune=True
)
# push separately the model card to be independant from the rest of the model
if self.args.should_save:
self.create_model_card(model_name=model_name, **kwargs)
try:
self.repo.push_to_hub(
commit_message="update model card README.md", blocking=blocking, auto_lfs_prune=True
)
except EnvironmentError as exc:
logger.error(f"Error pushing update to the model card. Please read logs and retry.\n${exc}")
return git_head_commit_url
#
# Deprecated code
#
def prediction_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
args = self.args
if not has_length(dataloader):
raise ValueError("dataloader must implement a working __len__")
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
# XXX: we don't need optim/sched for inference, but this needs to be sorted out, since
# for example the Z3-optimizer is a must for zero3 to work even for inference - what we
# don't need is the deepspeed basic optimizer which is self.optimizer.optimizer
deepspeed_engine.optimizer.optimizer = None
deepspeed_engine.lr_scheduler = None
model = self._wrap_model(self.model, training=False, dataloader=dataloader)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info(f"***** Running {description} *****")
logger.info(f" Num examples = {num_examples}")
logger.info(f" Batch size = {batch_size}")
losses_host: torch.Tensor = None
preds_host: Union[torch.Tensor, List[torch.Tensor]] = None
labels_host: Union[torch.Tensor, List[torch.Tensor]] = None
inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None
world_size = max(1, args.world_size)
eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)
if not prediction_loss_only:
# The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass
# a batch size to the sampler)
make_multiple_of = None
if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler):
make_multiple_of = dataloader.sampler.batch_size
preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of)
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
if args.past_index >= 0:
self._past = None
self.callback_handler.eval_dataloader = dataloader
for step, inputs in enumerate(dataloader):
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None
if loss is not None:
losses = loss.repeat(batch_size)
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
if inputs_decode is not None:
inputs_host = (
inputs_decode
if inputs_host is None
else nested_concat(inputs_host, inputs_decode, padding_index=-100)
)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host, inputs_host = None, None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses"))
if not prediction_loss_only:
preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds"))
labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids"))
inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids"))
eval_loss = eval_losses_gatherer.finalize()
preds = preds_gatherer.finalize() if not prediction_loss_only else None
label_ids = labels_gatherer.finalize() if not prediction_loss_only else None
inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None
if self.compute_metrics is not None and preds is not None and label_ids is not None:
if args.include_inputs_for_metrics:
metrics = self.compute_metrics(
EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids)
)
else:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if eval_loss is not None:
metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples)
def _gather_and_numpify(self, tensors, name):
if tensors is None:
return
if is_torch_tpu_available():
tensors = nested_xla_mesh_reduce(tensors, name)
elif is_sagemaker_mp_enabled():
tensors = smp_gather(tensors)
elif self.args.local_rank != -1:
tensors = distributed_concat(tensors)
return nested_numpify(tensors)
def _add_sm_patterns_to_gitignore(self) -> None:
# Make sure we only do this on the main process
if not self.is_world_process_zero():
return
patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"]
# Get current .gitignore content
if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")):
with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f:
current_content = f.read()
else:
current_content = ""
# Add the patterns to .gitignore
content = current_content
for pattern in patterns:
if pattern not in content:
if content.endswith("\n"):
content += pattern
else:
content += f"\n{pattern}"
# Write the .gitignore file if it has changed
if content != current_content:
with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f:
logger.debug(f"Writing .gitignore file. Content: {content}")
f.write(content)
self.repo.git_add(".gitignore")
# avoid race condition with git status
time.sleep(0.5)
if not self.repo.is_repo_clean():
self.repo.git_commit("Add *.sagemaker patterns to .gitignore.")
self.repo.git_push() | --- +++ @@ -12,6 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+"""
+The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.
+"""
import contextlib
import functools
@@ -217,6 +220,87 @@
class Trainer:
+ """
+ Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.
+
+ Args:
+ model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*):
+ The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed.
+
+ <Tip>
+
+ [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use
+ your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers
+ models.
+
+ </Tip>
+
+ args ([`TrainingArguments`], *optional*):
+ The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the
+ `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided.
+ data_collator (`DataCollator`, *optional*):
+ The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will
+ default to [`default_data_collator`] if no `tokenizer` is provided, an instance of
+ [`DataCollatorWithPadding`] otherwise.
+ train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*):
+ The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the
+ `model.forward()` method are automatically removed.
+
+ Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a
+ distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a
+ `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will
+ manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally
+ sets the seed of the RNGs used.
+ eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*):
+ The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the
+ `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each
+ dataset prepending the dictionary key to the metric name.
+ tokenizer ([`PreTrainedTokenizerBase`], *optional*):
+ The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the
+ maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an
+ interrupted training or reuse the fine-tuned model.
+ model_init (`Callable[[], PreTrainedModel]`, *optional*):
+ A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start
+ from a new instance of the model as given by this function.
+
+ The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to
+ be able to choose different architectures according to hyper parameters (such as layer count, sizes of
+ inner layers, dropout probabilities etc).
+ compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*):
+ The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return
+ a dictionary string to metric values.
+ callbacks (List of [`TrainerCallback`], *optional*):
+ A list of callbacks to customize the training loop. Will add those to the list of default callbacks
+ detailed in [here](callback).
+
+ If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
+ optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple
+ containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model
+ and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
+ preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*):
+ A function that preprocess the logits right before caching them at each evaluation step. Must take two
+ tensors, the logits and the labels, and return the logits once processed as desired. The modifications made
+ by this function will be reflected in the predictions received by `compute_metrics`.
+
+ Note that the labels (second parameter) will be `None` if the dataset does not have them.
+
+ Important attributes:
+
+ - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`]
+ subclass.
+ - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the
+ original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`,
+ the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner
+ model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`.
+ - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from
+ data parallelism, this means some of the model layers are split on different GPUs).
+ - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set
+ to `False` if model parallel or deepspeed is used, or if the default
+ `TrainingArguments.place_model_on_device` is overridden to return `False` .
+ - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while
+ in `train`)
+
+ """
from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state
@@ -593,12 +677,41 @@ raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.")
def add_callback(self, callback):
+ """
+ Add a callback to the current list of [`~transformer.TrainerCallback`].
+
+ Args:
+ callback (`type` or [`~transformer.TrainerCallback`]):
+ A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
+ first case, will instantiate a member of that class.
+ """
self.callback_handler.add_callback(callback)
def pop_callback(self, callback):
+ """
+ Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it.
+
+ If the callback is not found, returns `None` (and no error is raised).
+
+ Args:
+ callback (`type` or [`~transformer.TrainerCallback`]):
+ A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
+ first case, will pop the first member of that class found in the list of callbacks.
+
+ Returns:
+ [`~transformer.TrainerCallback`]: The callback removed, if found.
+ """
return self.callback_handler.pop_callback(callback)
def remove_callback(self, callback):
+ """
+ Remove a callback from the current list of [`~transformer.TrainerCallback`].
+
+ Args:
+ callback (`type` or [`~transformer.TrainerCallback`]):
+ A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the
+ first case, will remove the first member of that class found in the list of callbacks.
+ """
self.callback_handler.remove_callback(callback)
def _move_model_to_device(self, model, device):
@@ -644,6 +757,7 @@ def _get_collator_with_removed_columns(
self, data_collator: Callable, description: Optional[str] = None
) -> Callable:
+ """Wrap the data collator in a callable removing unused columns."""
if not self.args.remove_unused_columns:
return data_collator
self._set_signature_columns_if_needed()
@@ -730,6 +844,14 @@ )
def get_train_dataloader(self) -> DataLoader:
+ """
+ Returns the training [`~torch.utils.data.DataLoader`].
+
+ Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed
+ training if necessary) otherwise.
+
+ Subclass and override this method if you want to inject some custom behavior.
+ """
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
@@ -801,6 +923,16 @@ )
def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:
+ """
+ Returns the evaluation [`~torch.utils.data.DataLoader`].
+
+ Subclass and override this method if you want to inject some custom behavior.
+
+ Args:
+ eval_dataset (`torch.utils.data.Dataset`, *optional*):
+ If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted
+ by the `model.forward()` method are automatically removed. It must implement `__len__`.
+ """
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
@@ -841,6 +973,16 @@ )
def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:
+ """
+ Returns the test [`~torch.utils.data.DataLoader`].
+
+ Subclass and override this method if you want to inject some custom behavior.
+
+ Args:
+ test_dataset (`torch.utils.data.Dataset`, *optional*):
+ The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the
+ `model.forward()` method are automatically removed. It must implement `__len__`.
+ """
data_collator = self.data_collator
if is_datasets_available() and isinstance(test_dataset, datasets.Dataset):
@@ -879,6 +1021,13 @@ )
def create_optimizer_and_scheduler(self, num_training_steps: int):
+ """
+ Setup the optimizer and the learning rate scheduler.
+
+ We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
+ Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or
+ `create_scheduler`) in a subclass.
+ """
self.create_optimizer()
if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16:
# If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer
@@ -888,6 +1037,12 @@ self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
def create_optimizer(self):
+ """
+ Setup the optimizer.
+
+ We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
+ Trainer's init through `optimizers`, or subclass and override this method in a subclass.
+ """
opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model
if self.optimizer is None:
@@ -939,6 +1094,14 @@
@staticmethod
def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]:
+ """
+ Returns the optimizer class and optimizer parameters based on the training arguments.
+
+ Args:
+ args (`transformers.training_args.TrainingArguments`):
+ The training arguments for the training session.
+
+ """
# parse args.optim_args
optim_args = {}
@@ -1021,6 +1184,13 @@ return optimizer_cls, optimizer_kwargs
def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None):
+ """
+ Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or
+ passed as an argument.
+
+ Args:
+ num_training_steps (int): The number of training steps to do.
+ """
if self.lr_scheduler is None:
self.lr_scheduler = get_scheduler(
self.args.lr_scheduler_type,
@@ -1031,6 +1201,10 @@ return self.lr_scheduler
def num_examples(self, dataloader: DataLoader) -> int:
+ """
+ Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When
+ dataloader.dataset does not exist or has no length, estimates as best it can
+ """
try:
dataset = dataloader.dataset
# Special case for IterableDatasetShard, we need to dig deeper
@@ -1041,6 +1215,7 @@ return len(dataloader) * self.args.per_device_train_batch_size
def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]):
+ """HP search setup code"""
self._trial = trial
if self.hp_search_backend is None or trial is None:
@@ -1385,6 +1560,22 @@ ignore_keys_for_eval: Optional[List[str]] = None,
**kwargs,
):
+ """
+ Main training entry point.
+
+ Args:
+ resume_from_checkpoint (`str` or `bool`, *optional*):
+ If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
+ `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
+ of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
+ trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
+ The trial run or the hyperparameter dictionary for hyperparameter search.
+ ignore_keys_for_eval (`List[str]`, *optional*)
+ A list of keys in the output of your model (if it is a dictionary) that should be ignored when
+ gathering predictions for evaluation during the training.
+ kwargs:
+ Additional keyword arguments used to hide deprecated arguments
+ """
if resume_from_checkpoint is False:
resume_from_checkpoint = None
@@ -2199,6 +2390,7 @@ self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
def _load_optimizer_and_scheduler(self, checkpoint):
+ """If optimizer and scheduler states exist, load them."""
if checkpoint is None:
return
@@ -2264,6 +2456,51 @@ hp_name: Optional[Callable[["optuna.Trial"], str]] = None,
**kwargs,
) -> BestRun:
+ """
+ Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined
+ by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,
+ the sum of all metrics otherwise.
+
+ <Tip warning={true}>
+
+ To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to
+ reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to
+ subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom
+ optimizer/scheduler.
+
+ </Tip>
+
+ Args:
+ hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*):
+ A function that defines the hyperparameter search space. Will default to
+ [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or
+ [`~trainer_utils.default_hp_space_sigopt`] depending on your backend.
+ compute_objective (`Callable[[Dict[str, float]], float]`, *optional*):
+ A function computing the objective to minimize or maximize from the metrics returned by the `evaluate`
+ method. Will default to [`~trainer_utils.default_compute_objective`].
+ n_trials (`int`, *optional*, defaults to 100):
+ The number of trial runs to test.
+ direction (`str`, *optional*, defaults to `"minimize"`):
+ Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick
+ `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics.
+ backend (`str` or [`~training_utils.HPSearchBackend`], *optional*):
+ The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending
+ on which one is installed. If all are installed, will default to optuna.
+ hp_name (`Callable[["optuna.Trial"], str]]`, *optional*):
+ A function that defines the trial/run name. Will default to None.
+ kwargs (`Dict[str, Any]`, *optional*):
+ Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more
+ information see:
+
+ - the documentation of
+ [optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html)
+ - the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run)
+ - the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create)
+
+ Returns:
+ [`trainer_utils.BestRun`]: All the information about the best run. Experiment summary can be found in
+ `run_summary` attribute for Ray backend.
+ """
if backend is None:
backend = default_hp_search_backend()
if backend is None:
@@ -2306,6 +2543,15 @@ return best_run
def log(self, logs: Dict[str, float]) -> None:
+ """
+ Log `logs` on the various objects watching training.
+
+ Subclass and override this method to inject custom behavior.
+
+ Args:
+ logs (`Dict[str, float]`):
+ The values to log.
+ """
if self.state.epoch is not None:
logs["epoch"] = round(self.state.epoch, 2)
@@ -2314,6 +2560,9 @@ self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)
def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]:
+ """
+ Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors.
+ """
if isinstance(data, Mapping):
return type(data)({k: self._prepare_input(v) for k, v in data.items()})
elif isinstance(data, (tuple, list)):
@@ -2329,6 +2578,10 @@ return data
def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:
+ """
+ Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and
+ handling potential state.
+ """
inputs = self._prepare_input(inputs)
if len(inputs) == 0:
raise ValueError(
@@ -2341,9 +2594,16 @@ return inputs
def compute_loss_context_manager(self):
+ """
+ A helper wrapper to group together context managers.
+ """
return self.autocast_smart_context_manager()
def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True):
+ """
+ A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired
+ arguments, depending on the situation.
+ """
if self.use_cuda_amp or self.use_cpu_amp:
if is_torch_greater_or_equal_than_1_10:
ctx_manager = (
@@ -2359,6 +2619,23 @@ return ctx_manager
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
+ """
+ Perform a training step on a batch of inputs.
+
+ Subclass and override to inject custom behavior.
+
+ Args:
+ model (`nn.Module`):
+ The model to train.
+ inputs (`Dict[str, Union[torch.Tensor, Any]]`):
+ The inputs and targets of the model.
+
+ The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
+ argument `labels`. Check your model's documentation for all accepted arguments.
+
+ Return:
+ `torch.Tensor`: The tensor with training loss on this batch.
+ """
model.train()
inputs = self._prepare_inputs(inputs)
@@ -2390,6 +2667,11 @@ return loss.detach()
def compute_loss(self, model, inputs, return_outputs=False):
+ """
+ How the loss is computed by Trainer. By default, all models return the loss in the first element.
+
+ Subclass and override for custom behavior.
+ """
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
@@ -2417,9 +2699,17 @@ return (loss, outputs) if return_outputs else loss
def is_local_process_zero(self) -> bool:
+ """
+ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several
+ machines) main process.
+ """
return self.args.local_process_index == 0
def is_world_process_zero(self) -> bool:
+ """
+ Whether or not this process is the global main process (when training in a distributed fashion on several
+ machines, this is only going to be `True` for one process).
+ """
# Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global
# process index.
if is_sagemaker_mp_enabled():
@@ -2428,6 +2718,11 @@ return self.args.process_index == 0
def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False):
+ """
+ Will save the model, so you can reload it using `from_pretrained()`.
+
+ Will only save from the main process.
+ """
if output_dir is None:
output_dir = self.args.output_dir
@@ -2615,6 +2910,30 @@ ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> Dict[str, float]:
+ """
+ Run evaluation and returns metrics.
+
+ The calling script will be responsible for providing a method to compute metrics, as they are task-dependent
+ (pass it to the init `compute_metrics` argument).
+
+ You can also subclass and override this method to inject custom behavior.
+
+ Args:
+ eval_dataset (`Dataset`, *optional*):
+ Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns
+ not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__`
+ method.
+ ignore_keys (`Lst[str]`, *optional*):
+ A list of keys in the output of your model (if it is a dictionary) that should be ignored when
+ gathering predictions.
+ metric_key_prefix (`str`, *optional*, defaults to `"eval"`):
+ An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
+ "eval_bleu" if the prefix is "eval" (default)
+
+ Returns:
+ A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The
+ dictionary also contains the epoch number which comes from the training state.
+ """
# memory metrics - must set up as early as possible
self._memory_tracker.start()
@@ -2659,6 +2978,38 @@ def predict(
self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test"
) -> PredictionOutput:
+ """
+ Run prediction and returns predictions and potential metrics.
+
+ Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method
+ will also return metrics, like in `evaluate()`.
+
+ Args:
+ test_dataset (`Dataset`):
+ Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the
+ `model.forward()` method are automatically removed. Has to implement the method `__len__`
+ ignore_keys (`Lst[str]`, *optional*):
+ A list of keys in the output of your model (if it is a dictionary) that should be ignored when
+ gathering predictions.
+ metric_key_prefix (`str`, *optional*, defaults to `"test"`):
+ An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named
+ "test_bleu" if the prefix is "test" (default)
+
+ <Tip>
+
+ If your predictions or labels have different sequence length (for instance because you're doing dynamic padding
+ in a token classification task) the predictions will be padded (on the right) to allow for concatenation into
+ one array. The padding index is -100.
+
+ </Tip>
+
+ Returns: *NamedTuple* A namedtuple with the following keys:
+
+ - predictions (`np.ndarray`): The predictions on `test_dataset`.
+ - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some).
+ - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained
+ labels).
+ """
# memory metrics - must set up as early as possible
self._memory_tracker.start()
@@ -2694,6 +3045,11 @@ ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
+ """
+ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
+
+ Works both with or without labels.
+ """
args = self.args
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
@@ -2893,6 +3249,10 @@ return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def _nested_gather(self, tensors, name=None):
+ """
+ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
+ concatenating them to `gathered`
+ """
if tensors is None:
return
if is_torch_tpu_available():
@@ -2907,6 +3267,10 @@
# Copied from Accelerate.
def _pad_across_processes(self, tensor, pad_index=-100):
+ """
+ Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
+ they can safely be gathered.
+ """
if isinstance(tensor, (list, tuple)):
return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor)
elif isinstance(tensor, dict):
@@ -2943,6 +3307,29 @@ prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]:
+ """
+ Perform an evaluation step on `model` using `inputs`.
+
+ Subclass and override to inject custom behavior.
+
+ Args:
+ model (`nn.Module`):
+ The model to evaluate.
+ inputs (`Dict[str, Union[torch.Tensor, Any]]`):
+ The inputs and targets of the model.
+
+ The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
+ argument `labels`. Check your model's documentation for all accepted arguments.
+ prediction_loss_only (`bool`):
+ Whether or not to return the loss only.
+ ignore_keys (`Lst[str]`, *optional*):
+ A list of keys in the output of your model (if it is a dictionary) that should be ignored when
+ gathering predictions.
+
+ Return:
+ Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss,
+ logits and labels (each being optional).
+ """
has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names)
# For CLIP-like models capable of returning loss values.
# If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss`
@@ -3019,12 +3406,33 @@ return (loss, logits, labels)
def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):
+ """
+ For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point
+ operations for every backward + forward pass. If using another model, either implement such a method in the
+ model or subclass and override this method.
+
+ Args:
+ inputs (`Dict[str, Union[torch.Tensor, Any]]`):
+ The inputs and targets of the model.
+
+ Returns:
+ `int`: The number of floating-point operations.
+ """
if hasattr(self.model, "floating_point_ops"):
return self.model.floating_point_ops(inputs)
else:
return 0
def init_git_repo(self, at_init: bool = False):
+ """
+ Initializes a git repo in `self.args.hub_model_id`.
+
+ Args:
+ at_init (`bool`, *optional*, defaults to `False`):
+ Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is
+ `True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped
+ out.
+ """
if not self.is_world_process_zero():
return
if self.args.hub_model_id is None:
@@ -3074,6 +3482,31 @@ dataset: Union[str, List[str], None] = None,
dataset_args: Union[str, List[str], None] = None,
):
+ """
+ Creates a draft of a model card using the information available to the `Trainer`.
+
+ Args:
+ language (`str`, *optional*):
+ The language of the model (if applicable)
+ license (`str`, *optional*):
+ The license of the model. Will default to the license of the pretrained model used, if the original
+ model given to the `Trainer` comes from a repo on the Hub.
+ tags (`str` or `List[str]`, *optional*):
+ Some tags to be included in the metadata of the model card.
+ model_name (`str`, *optional*):
+ The name of the model.
+ finetuned_from (`str`, *optional*):
+ The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo
+ of the original model given to the `Trainer` (if it comes from the Hub).
+ tasks (`str` or `List[str]`, *optional*):
+ One or several task identifiers, to be included in the metadata of the model card.
+ dataset_tags (`str` or `List[str]`, *optional*):
+ One or several dataset tags, to be included in the metadata of the model card.
+ dataset (`str` or `List[str]`, *optional*):
+ One or several dataset identifiers, to be included in the metadata of the model card.
+ dataset_args (`str` or `List[str]`, *optional*):
+ One or several dataset arguments, to be included in the metadata of the model card.
+ """
if not self.is_world_process_zero():
return
@@ -3136,6 +3569,21 @@ shutil.move(tmp_checkpoint, checkpoint_folder)
def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str:
+ """
+ Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*.
+
+ Parameters:
+ commit_message (`str`, *optional*, defaults to `"End of training"`):
+ Message to commit while pushing.
+ blocking (`bool`, *optional*, defaults to `True`):
+ Whether the function should return only when the `git push` has finished.
+ kwargs:
+ Additional keyword arguments passed along to [`~Trainer.create_model_card`].
+
+ Returns:
+ The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of
+ the commit and an object to track the progress of the commit if `blocking=True`
+ """
# If a user calls manually `push_to_hub` with `self.args.push_to_hub = False`, we try to create the repo but
# it might fail.
if not hasattr(self, "repo"):
@@ -3188,6 +3636,11 @@ ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
+ """
+ Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
+
+ Works both with or without labels.
+ """
args = self.args
if not has_length(dataloader):
@@ -3322,6 +3775,10 @@ return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples)
def _gather_and_numpify(self, tensors, name):
+ """
+ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before
+ concatenating them to `gathered`
+ """
if tensors is None:
return
if is_torch_tpu_available():
@@ -3334,6 +3791,7 @@ return nested_numpify(tensors)
def _add_sm_patterns_to_gitignore(self) -> None:
+ """Add SageMaker Checkpointing patterns to .gitignore file."""
# Make sure we only do this on the main process
if not self.is_world_process_zero():
return
@@ -3369,4 +3827,4 @@
if not self.repo.is_repo_clean():
self.repo.git_commit("Add *.sagemaker patterns to .gitignore.")
- self.repo.git_push()+ self.repo.git_push()
| https://raw.githubusercontent.com/zai-org/ChatGLM-6B/HEAD/ptuning/trainer.py |
Generate missing documentation strings |
import cache
def save_query(client_id, query):
cache.put("l:%s" % client_id, query)
def last_query(client_id):
return cache.get("l:%s" % client_id) | --- +++ @@ -1,10 +1,19 @@+"""
+Support for the stateful queries
+"""
import cache
def save_query(client_id, query):
+ """
+ Save the last query `query` for the client `client_id`
+ """
cache.put("l:%s" % client_id, query)
def last_query(client_id):
- return cache.get("l:%s" % client_id)+ """
+ Return the last query for the client `client_id`
+ """
+ return cache.get("l:%s" % client_id)
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/stateful_queries.py |
Write docstrings for utility functions |
import glob
import os
from .adapter import Adapter # pylint: disable=relative-import
def _get_filenames(path):
return [os.path.split(topic)[1] for topic in glob.glob(path)]
class RepositoryAdapter(Adapter):
def _get_list(self, prefix=None):
answer = _get_filenames(
os.path.join(
self.local_repository_location(),
self._cheatsheet_files_prefix,
"*" + self._cheatsheet_files_extension,
)
)
ext = self._cheatsheet_files_extension
if ext:
answer = [
filename[: -len(ext)] for filename in answer if filename.endswith(ext)
]
return answer
def _get_page(self, topic, request_options=None):
filename = os.path.join(
self.local_repository_location(), self._cheatsheet_files_prefix, topic
)
if os.path.exists(filename) and not os.path.isdir(filename):
answer = self._format_page(open(filename, "r").read())
else:
# though it should not happen
answer = "%s:%s not found" % (str(self.__class__), topic)
return answer
class GitRepositoryAdapter(RepositoryAdapter): # pylint: disable=abstract-method
@classmethod
def fetch_command(cls):
if not cls._repository_url:
return None
if not cls._repository_url.startswith("https://github.com/"):
# in this case `fetch` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
"Do not known how to handle this repository: %s" % cls._repository_url
)
local_repository_dir = cls.local_repository_location()
if not local_repository_dir:
return None
return ["git", "clone", "--depth=1", cls._repository_url, local_repository_dir]
@classmethod
def update_command(cls):
if not cls._repository_url:
return None
local_repository_dir = cls.local_repository_location()
if not local_repository_dir:
return None
if not cls._repository_url.startswith("https://github.com/"):
# in this case `update` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
"Do not known how to handle this repository: %s" % cls._repository_url
)
return ["git", "pull"]
@classmethod
def current_state_command(cls):
if not cls._repository_url:
return None
local_repository_dir = cls.local_repository_location()
if not local_repository_dir:
return None
if not cls._repository_url.startswith("https://github.com/"):
# in this case `update` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
"Do not known how to handle this repository: %s" % cls._repository_url
)
return ["git", "rev-parse", "--short", "HEAD", "--"]
@classmethod
def save_state(cls, state):
local_repository_dir = cls.local_repository_location()
state_filename = os.path.join(local_repository_dir, ".cached_revision")
open(state_filename, "wb").write(state)
@classmethod
def get_state(cls):
local_repository_dir = cls.local_repository_location()
state_filename = os.path.join(local_repository_dir, ".cached_revision")
state = None
if os.path.exists(state_filename):
state = open(state_filename, "r").read()
return state
@classmethod
def get_updates_list_command(cls):
current_state = cls.get_state()
if not current_state:
return ["git", "ls-tree", "--full-tree", "-r", "--name-only", "HEAD", "--"]
return ["git", "diff", "--name-only", current_state, "HEAD", "--"] | --- +++ @@ -1,3 +1,6 @@+"""
+Implementation of `GitRepositoryAdapter`, adapter that is used to handle git repositories
+"""
import glob
import os
@@ -10,8 +13,16 @@
class RepositoryAdapter(Adapter):
+ """
+ Implements methods needed to handle standard
+ repository based adapters.
+ """
def _get_list(self, prefix=None):
+ """
+ List of files in the cheat sheets directory
+ with the extension removed
+ """
answer = _get_filenames(
os.path.join(
@@ -45,9 +56,18 @@
class GitRepositoryAdapter(RepositoryAdapter): # pylint: disable=abstract-method
+ """
+ Implements all methods needed to handle cache handling
+ for git-repository-based adapters
+ """
@classmethod
def fetch_command(cls):
+ """
+ Initial fetch of the repository.
+ Return cmdline that has to be executed to fetch the repository.
+ Skipping if `self._repository_url` is not specified
+ """
if not cls._repository_url:
return None
@@ -67,6 +87,11 @@
@classmethod
def update_command(cls):
+ """
+ Update of the repository.
+ Return cmdline that has to be executed to update the repository
+ inside `local_repository_location()`.
+ """
if not cls._repository_url:
return None
@@ -86,6 +111,10 @@
@classmethod
def current_state_command(cls):
+ """
+ Get current state of repository (current revision).
+ This is used to find what cache entries should be invalidated.
+ """
if not cls._repository_url:
return None
@@ -105,12 +134,20 @@
@classmethod
def save_state(cls, state):
+ """
+ Save state `state` of the repository.
+ Must be called after the cache clean up.
+ """
local_repository_dir = cls.local_repository_location()
state_filename = os.path.join(local_repository_dir, ".cached_revision")
open(state_filename, "wb").write(state)
@classmethod
def get_state(cls):
+ """
+ Return the saved `state` of the repository.
+ If state cannot be read, return None
+ """
local_repository_dir = cls.local_repository_location()
state_filename = os.path.join(local_repository_dir, ".cached_revision")
@@ -121,7 +158,11 @@
@classmethod
def get_updates_list_command(cls):
+ """
+ Return list of updates since the last update whose id is saved as the repository state.
+ The list is used to invalidate the cache.
+ """
current_state = cls.get_state()
if not current_state:
return ["git", "ls-tree", "--full-tree", "-r", "--name-only", "HEAD", "--"]
- return ["git", "diff", "--name-only", current_state, "HEAD", "--"]+ return ["git", "diff", "--name-only", current_state, "HEAD", "--"]
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/adapter/git_adapter.py |
Document all public functions with docstrings |
import time
from globals import log
_WHITELIST = ["5.9.243.177"]
def _time_caps(minutes, hours, days):
return {
"min": minutes,
"hour": hours,
"day": days,
}
class Limits(object):
def __init__(self):
self.intervals = ["min", "hour", "day"]
self.divisor = _time_caps(60, 3600, 86400)
self.limit = _time_caps(30, 600, 1000)
self.last_update = _time_caps(0, 0, 0)
self.counter = {
"min": {},
"hour": {},
"day": {},
}
self._clear_counters_if_needed()
def _log_visit(self, interval, ip_address):
if ip_address not in self.counter[interval]:
self.counter[interval][ip_address] = 0
self.counter[interval][ip_address] += 1
def _limit_exceeded(self, interval, ip_address):
visits = self.counter[interval][ip_address]
limit = self._get_limit(interval)
return visits > limit
def _get_limit(self, interval):
return self.limit[interval]
def _report_excessive_visits(self, interval, ip_address):
log(
"%s LIMITED [%s for %s]" % (ip_address, self._get_limit(interval), interval)
)
def check_ip(self, ip_address):
if ip_address in _WHITELIST:
return None
self._clear_counters_if_needed()
for interval in self.intervals:
self._log_visit(interval, ip_address)
if self._limit_exceeded(interval, ip_address):
self._report_excessive_visits(interval, ip_address)
return "Not so fast! Number of queries per %s is limited to %s" % (
interval,
self._get_limit(interval),
)
return None
def reset(self):
for interval in self.intervals:
self.counter[interval] = {}
def _clear_counters_if_needed(self):
current_time = int(time.time())
for interval in self.intervals:
if current_time // self.divisor[interval] != self.last_update[interval]:
self.counter[interval] = {}
self.last_update[interval] = current_time / self.divisor[interval] | --- +++ @@ -1,3 +1,20 @@+"""
+Connection limitation.
+
+Number of connections from one IP is limited.
+We have nothing against scripting and automated queries.
+Even the opposite, we encourage them. But there are some
+connection limits that even we can't handle.
+Currently the limits are quite restrictive, but they will be relaxed
+in the future.
+
+Usage:
+
+ limits = Limits()
+ not_allowed = limits.check_ip(ip_address)
+ if not_allowed:
+ return "ERROR: %s" % not_allowed
+"""
import time
from globals import log
@@ -14,6 +31,13 @@
class Limits(object):
+ """
+ Queries limitation (by IP).
+
+ Exports:
+
+ check_ip(ip_address)
+ """
def __init__(self):
self.intervals = ["min", "hour", "day"]
@@ -49,6 +73,10 @@ )
def check_ip(self, ip_address):
+ """
+ Check if `ip_address` is allowed, and if not raise an RuntimeError exception.
+ Return True otherwise
+ """
if ip_address in _WHITELIST:
return None
self._clear_counters_if_needed()
@@ -63,6 +91,9 @@ return None
def reset(self):
+ """
+ Reset all counters for all IPs
+ """
for interval in self.intervals:
self.counter[interval] = {}
@@ -71,4 +102,4 @@ for interval in self.intervals:
if current_time // self.divisor[interval] != self.last_update[interval]:
self.counter[interval] = {}
- self.last_update[interval] = current_time / self.divisor[interval]+ self.last_update[interval] = current_time / self.divisor[interval]
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/limits.py |
Write docstrings for backend logic |
import abc
import os
from six import with_metaclass
from config import CONFIG
class AdapterMC(type):
def __repr__(cls):
if hasattr(cls, "_class_repr"):
return getattr(cls, "_class_repr")()
return super(AdapterMC, cls).__repr__()
class Adapter(with_metaclass(AdapterMC, object)):
_adapter_name = None
_output_format = "code"
_cache_needed = False
_repository_url = None
_local_repository_location = None
_cheatsheet_files_prefix = ""
_cheatsheet_files_extension = ""
_pages_list = []
@classmethod
def _class_repr(cls):
return "[Adapter: %s (%s)]" % (cls._adapter_name, cls.__name__)
def __init__(self):
self._list = {None: self._get_list()}
@classmethod
def name(cls):
return cls._adapter_name
@abc.abstractmethod
def _get_list(self, prefix=None):
return self._pages_list
def get_list(self, prefix=None):
if prefix in self._list:
return self._list[prefix]
self._list[prefix] = set(self._get_list(prefix=prefix))
return self._list[prefix]
def is_found(self, topic):
return topic in self._list[None]
def is_cache_needed(self):
return self._cache_needed
@staticmethod
def _format_page(text):
return text
@abc.abstractmethod
def _get_page(self, topic, request_options=None):
pass
def _get_output_format(self, topic):
if "/" in topic:
subquery = topic.split("/")[-1]
else:
subquery = topic
if subquery in [":list"]:
return "text"
return self._output_format
# pylint: disable=unused-argument
@staticmethod
def _get_filetype(topic):
return None
def get_page_dict(self, topic, request_options=None):
#
# if _get_page() returns a dict, use the dictionary
# for the answer. It is possible to specify some
# useful properties as the part of the answer
# (e.g. "cache")
# answer by _get_page() always overrides all default properties
#
answer = self._get_page(topic, request_options=request_options)
if not isinstance(answer, dict):
answer = {"answer": answer}
answer_dict = {
"topic": topic,
"topic_type": self._adapter_name,
"format": self._get_output_format(topic),
"cache": self._cache_needed,
}
answer_dict.update(answer)
# pylint: disable=assignment-from-none
filetype = self._get_filetype(topic)
if filetype:
answer_dict["filetype"] = filetype
return answer_dict
@classmethod
def local_repository_location(cls, cheat_sheets_location=False):
dirname = None
if cls._local_repository_location:
dirname = cls._local_repository_location
if not dirname and cls._repository_url:
dirname = cls._repository_url
if dirname.startswith("https://"):
dirname = dirname[8:]
elif dirname.startswith("http://"):
dirname = dirname[7:]
# if we did not manage to find out dirname up to this point,
# that means that neither repository url, not repository location
# is specified for the adapter, so it should be skipped
if not dirname:
return None
if dirname.startswith("/"):
return dirname
# it is possible that several repositories will
# be mapped to the same location name
# (because only the last part of the path is used)
# in this case provide the name in _local_repository_location
# (detected by fetch.py)
if "/" in dirname:
dirname = dirname.split("/")[-1]
path = os.path.join(CONFIG["path.repositories"], dirname)
if cheat_sheets_location:
path = os.path.join(path, cls._cheatsheet_files_prefix)
return path
@classmethod
def repository_url(cls):
return cls._repository_url
@classmethod
def fetch_command(cls):
if not cls._repository_url:
return None
# in this case `fetch` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
"Do not known how to handle this repository: %s" % cls._repository_url
)
@classmethod
def update_command(cls):
if not cls._repository_url:
return None
local_repository_dir = cls.local_repository_location()
if not local_repository_dir:
return None
# in this case `update` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
"Do not known how to handle this repository: %s" % cls._repository_url
)
@classmethod
def current_state_command(cls):
if not cls._repository_url:
return None
local_repository_dir = cls.local_repository_location()
if not local_repository_dir:
return None
# in this case `update` has to be implemented
# in the distinct adapter subclass
raise RuntimeError(
"Do not known how to handle this repository: %s" % cls._repository_url
)
@classmethod
def save_state(cls, state):
local_repository_dir = cls.local_repository_location()
state_filename = os.path.join(local_repository_dir, ".cached_revision")
open(state_filename, "w").write(state)
@classmethod
def get_state(cls):
local_repository_dir = cls.local_repository_location()
state_filename = os.path.join(local_repository_dir, ".cached_revision")
state = None
if os.path.exists(state_filename):
state = open(state_filename, "r").read()
return state
@classmethod
def get_updates_list_command(cls):
return None
@classmethod
def get_updates_list(cls, updated_files_list):
if not cls._cheatsheet_files_prefix:
return updated_files_list
answer = []
cut_len = len(cls._cheatsheet_files_prefix)
for entry in updated_files_list:
if entry.startswith(cls._cheatsheet_files_prefix):
answer.append(entry[cut_len:])
else:
answer.append(entry)
return answer
def all_adapters(as_dict=False):
def _all_subclasses(cls):
return set(cls.__subclasses__()).union(
set([s for c in cls.__subclasses__() for s in _all_subclasses(c)])
)
if as_dict:
return {x.name(): x for x in _all_subclasses(Adapter)}
return list(_all_subclasses(Adapter))
def adapter_by_name(name):
return all_adapters(as_dict=True).get(name) | --- +++ @@ -1,3 +1,10 @@+"""
+`Adapter`, base class of the adapters.
+
+Configuration parameters:
+
+ path.repositories
+"""
import abc
import os
@@ -6,6 +13,10 @@
class AdapterMC(type):
+ """
+ Adapter Metaclass.
+ Defines string representation of adapters
+ """
def __repr__(cls):
if hasattr(cls, "_class_repr"):
@@ -14,6 +25,21 @@
class Adapter(with_metaclass(AdapterMC, object)):
+ """
+ An abstract class, defines methods:
+
+ (cheat sheets retrieval)
+ * get_list
+ * is_found
+ * is_cache_needed
+
+ (repositories management)
+ " fetch
+ * update
+
+ and several properties that have to be set in each adapter subclass.
+
+ """
_adapter_name = None
_output_format = "code"
@@ -33,6 +59,9 @@
@classmethod
def name(cls):
+ """
+ Return name of the adapter
+ """
return cls._adapter_name
@abc.abstractmethod
@@ -40,6 +69,9 @@ return self._pages_list
def get_list(self, prefix=None):
+ """
+ Return available pages for `prefix`
+ """
if prefix in self._list:
return self._list[prefix]
@@ -48,18 +80,34 @@ return self._list[prefix]
def is_found(self, topic):
+ """
+ check if `topic` is available
+ CAUTION: only root is checked
+ """
return topic in self._list[None]
def is_cache_needed(self):
+ """
+ Return True if answers should be cached.
+ Return False if answers should not be cached.
+ """
return self._cache_needed
@staticmethod
def _format_page(text):
+ """
+ Preformatting page hook.
+ Converts `text` (as in the initial repository)
+ to text (as to be displayed).
+ """
return text
@abc.abstractmethod
def _get_page(self, topic, request_options=None):
+ """
+ Return page for `topic`
+ """
pass
def _get_output_format(self, topic):
@@ -75,9 +123,15 @@ # pylint: disable=unused-argument
@staticmethod
def _get_filetype(topic):
+ """
+ Return language name (filetype) for `topic`
+ """
return None
def get_page_dict(self, topic, request_options=None):
+ """
+ Return page dict for `topic`
+ """
#
# if _get_page() returns a dict, use the dictionary
@@ -106,6 +160,17 @@
@classmethod
def local_repository_location(cls, cheat_sheets_location=False):
+ """
+ Return local repository location.
+ If name `self._repository_url` for the class is not specified, return None
+ It is possible that several adapters has the same repository_url,
+ in this case they should use the same local directory.
+ If for some reason the local repository location should be overridden
+ (e.g. if several different branches of the same repository are used)
+ if should set in `self._local_repository_location` of the adapter.
+ If `cheat_sheets_location` is specified, return path of the cheat sheets
+ directory instead of the repository directory.
+ """
dirname = None
@@ -145,10 +210,18 @@
@classmethod
def repository_url(cls):
+ """
+ Return URL of the upstream repository
+ """
return cls._repository_url
@classmethod
def fetch_command(cls):
+ """
+ Initial fetch of the repository.
+ Return cmdline that has to be executed to fetch the repository.
+ Skipping if `self._repository_url` is not specified
+ """
if not cls._repository_url:
return None
@@ -160,6 +233,11 @@
@classmethod
def update_command(cls):
+ """
+ Update of the repository.
+ Return cmdline that has to be executed to update the repository
+ inside `local_repository_location()`.
+ """
if not cls._repository_url:
return None
@@ -176,6 +254,10 @@
@classmethod
def current_state_command(cls):
+ """
+ Get current state of repository (current revision).
+ This is used to find what cache entries should be invalidated.
+ """
if not cls._repository_url:
return None
@@ -192,12 +274,20 @@
@classmethod
def save_state(cls, state):
+ """
+ Save state `state` of the repository.
+ Must be called after the cache clean up.
+ """
local_repository_dir = cls.local_repository_location()
state_filename = os.path.join(local_repository_dir, ".cached_revision")
open(state_filename, "w").write(state)
@classmethod
def get_state(cls):
+ """
+ Return the saved `state` of the repository.
+ If state cannot be read, return None
+ """
local_repository_dir = cls.local_repository_location()
state_filename = os.path.join(local_repository_dir, ".cached_revision")
@@ -208,10 +298,19 @@
@classmethod
def get_updates_list_command(cls):
+ """
+ Return the command to get the list of updates
+ since the last update whose id is saved as the repository state (`cached_state`).
+ The list is used to invalidate the cache.
+ """
return None
@classmethod
def get_updates_list(cls, updated_files_list):
+ """
+ Return the pages that have to be invalidated if the files `updates_files_list`
+ were updated in the repository.
+ """
if not cls._cheatsheet_files_prefix:
return updated_files_list
@@ -226,6 +325,10 @@
def all_adapters(as_dict=False):
+ """
+ Return list of all known adapters
+ If `as_dict` is True, return dict {'name': adapter} instead of a list.
+ """
def _all_subclasses(cls):
return set(cls.__subclasses__()).union(
@@ -238,4 +341,8 @@
def adapter_by_name(name):
- return all_adapters(as_dict=True).get(name)+ """
+ Return adapter having this name,
+ or None if nothing found
+ """
+ return all_adapters(as_dict=True).get(name)
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/adapter/adapter.py |
Write docstrings for data processing functions |
from __future__ import print_function
import os
from pygments.styles import get_all_styles
# def get_all_styles():
# return []
_ENV_VAR_PREFIX = "CHEATSH"
_MYDIR = os.path.abspath(os.path.join(__file__, "..", ".."))
def _config_locations():
var = _ENV_VAR_PREFIX + "_PATH_WORKDIR"
workdir = (
os.environ[var]
if var in os.environ
else os.path.join(os.environ["HOME"], ".cheat.sh")
)
var = _ENV_VAR_PREFIX + "_CONFIG"
conf_file_workdir = (
os.environ[var]
if var in os.environ
else os.path.join(workdir, "etc/config.yaml")
)
conf_file_mydir = os.path.join(_MYDIR, "etc/config.yaml")
return workdir, conf_file_workdir, conf_file_mydir
_WORKDIR, _CONF_FILE_WORKDIR, _CONF_FILE_MYDIR = _config_locations()
_CONFIG = {
"adapters.active": [
"tldr",
"cheat",
"fosdem",
"translation",
"rosetta",
"late.nz",
"question",
"cheat.sheets",
"cheat.sheets dir",
"learnxiny",
"rfc",
"oeis",
"chmod",
],
"adapters.mandatory": [
"search",
],
"cache.redis.db": 0,
"cache.redis.host": "localhost",
"cache.redis.port": 6379,
"cache.redis.prefix": "",
"cache.type": "redis",
"frontend.styles": sorted(list(get_all_styles())),
"log.level": 4,
"path.internal.ansi2html": os.path.join(_MYDIR, "share/ansi2html.sh"),
"path.internal.bin": os.path.join(_MYDIR, "bin"),
"path.internal.bin.upstream": os.path.join(_MYDIR, "bin", "upstream"),
"path.internal.malformed": os.path.join(
_MYDIR, "share/static/malformed-response.html"
),
"path.internal.pages": os.path.join(_MYDIR, "share"),
"path.internal.static": os.path.join(_MYDIR, "share/static"),
"path.internal.templates": os.path.join(_MYDIR, "share/templates"),
"path.internal.vim": os.path.join(_MYDIR, "share/vim"),
"path.log.main": "log/main.log",
"path.log.queries": "log/queries.log",
"path.log.fetch": "log/fetch.log",
"path.repositories": "upstream",
"path.spool": "spool",
"path.workdir": _WORKDIR,
"routing.pre": [
("^$", "search"),
("^[^/]*/rosetta(/|$)", "rosetta"),
("^rfc/", "rfc"),
("^oeis/", "oeis"),
("^chmod/", "chmod"),
("^:", "internal"),
("/:list$", "internal"),
("/$", "cheat.sheets dir"),
],
"routing.main": [
("", "cheat.sheets"),
("", "cheat"),
("", "tldr"),
("", "late.nz"),
("", "fosdem"),
("", "learnxiny"),
],
"routing.post": [
("^[^/ +]*$", "unknown"),
("^[a-z][a-z]-[a-z][a-z]$", "translation"),
],
"routing.default": "question",
"upstream.url": "https://cheat.sh",
"upstream.timeout": 5,
"search.limit": 20,
"server.bind": "0.0.0.0",
"server.port": 8002,
}
class Config(dict):
def _absolute_path(self, val):
if val.startswith("/"):
return val
return os.path.join(self["path.workdir"], val)
def __init__(self, *args, **kwargs):
dict.__init__(self)
self.update(*args, **kwargs)
def __setitem__(self, key, val):
if key.startswith("path.") and not val.startswith("/"):
val = self._absolute_path(val)
dict.__setitem__(self, key, val)
def update(self, *args, **kwargs):
newdict = dict(*args, **kwargs)
if "path.workdir" in newdict:
self["path.workdir"] = newdict["path.workdir"]
for key, val in newdict.items():
self[key] = val
def _load_config_from_environ(config):
update = {}
for key, val in config.items():
if not isinstance(val, str) or isinstance(val, int):
continue
env_var = _ENV_VAR_PREFIX + "_" + key.replace(".", "_").upper()
if not env_var in os.environ:
continue
env_val = os.environ[env_var]
if isinstance(val, int):
try:
env_val = int(env_val)
except (ValueError, TypeError):
continue
update[key] = env_val
return update
def _get_nested(data, key):
if not data or not isinstance(data, dict):
return None
if "." not in key:
return data.get(key)
if key in data:
return data[key]
parts = key.split(".")
for i in range(len(parts))[::-1]:
prefix = ".".join(parts[:i])
if prefix in data:
return _get_nested(data[prefix], ".".join(parts[i:]))
return None
def _load_config_from_file(default_config, filename):
import yaml
update = {}
if not os.path.exists(filename):
return update
with open(filename) as f:
newconfig = yaml.load(f.read(), Loader=yaml.SafeLoader)
for key, val in default_config.items():
newval = _get_nested(newconfig, key)
if newval is None:
continue
if isinstance(val, int):
try:
newval = int(newval)
except (ValueError, TypeError):
continue
update[key] = newval
return update
CONFIG = Config()
CONFIG.update(_CONFIG)
CONFIG.update(_load_config_from_file(_CONFIG, _CONF_FILE_MYDIR))
if _CONF_FILE_WORKDIR != _CONF_FILE_MYDIR:
CONFIG.update(_load_config_from_file(_CONFIG, _CONF_FILE_WORKDIR))
CONFIG.update(_load_config_from_environ(_CONFIG))
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,45 @@+"""
+Global configuration of the project.
+
+All configurable parameters are stored in the global variable CONFIG,
+the only variable which is exported from the module.
+
+Default values of all configuration parameters are specified
+in the `_CONFIG` dictionary. Those parameters can be overridden
+by three means:
+ * config file `etc/config.yaml` located in the work dir
+ * config file `etc/config.yaml` located in the project dir
+ (if the work dir and the project dir are not the same)
+ * environment variables prefixed with `CHEATSH_`
+
+Configuration placement priorities, from high to low:
+ * environment variables;
+ * configuration file in the workdir
+ * configuration file in the project dir
+ * default values specified in the `_CONFIG` dictionary
+
+If the work dir and the project dir are not the same, we do not
+recommend that you use the config file located in the project dir,
+except the cases when you use your own cheat.sh fork, and thus
+configuration is a part of the project repository.
+In all other cases `WORKDIR/etc/config.yaml` should be preferred.
+Location of this config file can be overridden by the `CHEATSH_PATH_CONFIG`
+environment variable.
+
+Configuration parameters set by environment variables are mapped
+in this way:
+ * CHEATSH_ prefix is trimmed
+ * _ replaced with .
+ * the string is lowercased
+
+For instance, an environment variable named `CHEATSH_SERVER_PORT`
+specifies the value for the `server.port` configuration parameter.
+
+Only parameters that imply scalar values (integer or string)
+can be set using environment variables, for the rest config files
+should be used. If a parameter implies an integer, and the value
+specified by an environment variable is not an integer, it is ignored.
+"""
from __future__ import print_function
import os
@@ -13,6 +55,11 @@
def _config_locations():
+ """
+ Return three possible config locations
+ where configuration can be found:
+ * `_WORKDIR`, `_CONF_FILE_WORKDIR`, `_CONF_FILE_MYDIR`
+ """
var = _ENV_VAR_PREFIX + "_PATH_WORKDIR"
workdir = (
@@ -108,6 +155,10 @@
class Config(dict):
+ """
+ configuration dictionary that handles relative
+ paths properly (making them relative to path.workdir)
+ """
def _absolute_path(self, val):
if val.startswith("/"):
@@ -124,6 +175,11 @@ dict.__setitem__(self, key, val)
def update(self, *args, **kwargs):
+ """
+ the built-in __init__ doesn't call update,
+ and the built-in update doesn't call __setitem__,
+ so `update` should be overridden
+ """
newdict = dict(*args, **kwargs)
if "path.workdir" in newdict:
@@ -157,6 +213,19 @@
def _get_nested(data, key):
+ """
+ Return value for a hierrachical key (like a.b.c).
+ Return None if nothing found.
+ If there is a key with . in the name, and a subdictionary,
+ the former is preferred:
+
+ >>> print(_get_nested({'a.b': 10, 'a':{'b': 20}}, 'a.b'))
+ 10
+ >>> print(_get_nested({'a': {'b': 20}}, 'a.b'))
+ 20
+ >>> print(_get_nested({'a': {'b': {'c': 30}}}, 'a.b.c'))
+ 30
+ """
if not data or not isinstance(data, dict):
return None
@@ -209,4 +278,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/config.py |
Generate missing documentation strings |
import os
import json
from config import CONFIG
_REDIS = None
if CONFIG["cache.type"] == "redis":
import redis
_REDIS = redis.Redis(
host=CONFIG["cache.redis.host"],
port=CONFIG["cache.redis.port"],
db=CONFIG["cache.redis.db"],
)
_REDIS_PREFIX = ""
if CONFIG.get("cache.redis.prefix", ""):
_REDIS_PREFIX = CONFIG["cache.redis.prefix"] + ":"
def put(key, value):
if _REDIS_PREFIX:
key = _REDIS_PREFIX + key
if CONFIG["cache.type"] == "redis" and _REDIS:
if isinstance(value, (dict, list)):
value = json.dumps(value)
_REDIS.set(key, value)
def get(key):
if _REDIS_PREFIX:
key = _REDIS_PREFIX + key
if CONFIG["cache.type"] == "redis" and _REDIS:
value = _REDIS.get(key)
try:
value = json.loads(value)
except (ValueError, TypeError):
pass
return value
return None
def delete(key):
if _REDIS:
if _REDIS_PREFIX:
key = _REDIS_PREFIX + key
_REDIS.delete(key)
return None | --- +++ @@ -1,3 +1,16 @@+"""
+Cache implementation.
+Currently only two types of cache are allowed:
+ * "none" cache switched off
+ * "redis" use redis for cache
+
+Configuration parameters:
+
+ cache.type = redis | none
+ cache.redis.db
+ cache.redis.host
+ cache.redis.port
+"""
import os
import json
@@ -19,6 +32,9 @@
def put(key, value):
+ """
+ Save `value` with `key`, and serialize it if needed
+ """
if _REDIS_PREFIX:
key = _REDIS_PREFIX + key
@@ -31,6 +47,9 @@
def get(key):
+ """
+ Read `value` by `key`, and deserialize it if needed
+ """
if _REDIS_PREFIX:
key = _REDIS_PREFIX + key
@@ -46,6 +65,9 @@
def delete(key):
+ """
+ Remove `key` from the database
+ """
if _REDIS:
if _REDIS_PREFIX:
@@ -53,4 +75,4 @@
_REDIS.delete(key)
- return None+ return None
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/cache.py |
Add well-formatted docstrings |
import re
import json
from routing import get_answers, get_topics_list
from search import find_answers_by_keyword
from languages_data import LANGUAGE_ALIAS, rewrite_editor_section_name
import postprocessing
import frontend.html
import frontend.ansi
def _add_section_name(query):
# temporary solution before we don't find a fixed one
if " " not in query and "+" not in query:
return query
if "/" in query:
return query
if " " in query:
return re.sub(r" +", "/", query, count=1)
if "+" in query:
# replace only single + to avoid catching g++ and friends
return re.sub(r"([^\+])\+([^\+])", r"\1/\2", query, count=1)
def cheat_wrapper(query, request_options=None, output_format="ansi"):
def _rewrite_aliases(word):
if word == ":bash.completion":
return ":bash_completion"
return word
def _rewrite_section_name(query):
if "/" not in query:
return query
section_name, rest = query.split("/", 1)
if ":" in section_name:
section_name = rewrite_editor_section_name(section_name)
section_name = LANGUAGE_ALIAS.get(section_name, section_name)
return "%s/%s" % (section_name, rest)
def _sanitize_query(query):
return re.sub('[<>"]', "", query)
def _strip_hyperlink(query):
return re.sub("(,[0-9]+)+$", "", query)
def _parse_query(query):
topic = query
keyword = None
search_options = ""
keyword = None
if "~" in query:
topic = query
pos = topic.index("~")
keyword = topic[pos + 1 :]
topic = topic[:pos]
if "/" in keyword:
search_options = keyword[::-1]
search_options = search_options[: search_options.index("/")]
keyword = keyword[: -len(search_options) - 1]
return topic, keyword, search_options
query = _sanitize_query(query)
query = _add_section_name(query)
query = _rewrite_aliases(query)
query = _rewrite_section_name(query)
# at the moment, we just remove trailing slashes
# so queries python/ and python are equal
# query = _strip_hyperlink(query.rstrip('/'))
topic, keyword, search_options = _parse_query(query)
if keyword:
answers = find_answers_by_keyword(
topic, keyword, options=search_options, request_options=request_options
)
else:
answers = get_answers(topic, request_options=request_options)
answers = [
postprocessing.postprocess(
answer, keyword, search_options, request_options=request_options
)
for answer in answers
]
answer_data = {
"query": query,
"keyword": keyword,
"answers": answers,
}
if output_format == "html":
answer_data["topics_list"] = get_topics_list()
return frontend.html.visualize(answer_data, request_options)
elif output_format == "json":
return json.dumps(answer_data, indent=4)
return frontend.ansi.visualize(answer_data, request_options) | --- +++ @@ -1,3 +1,12 @@+"""
+Main cheat.sh wrapper.
+Parse the query, get answers from getters (using get_answer),
+visualize it using frontends and return the result.
+
+Exports:
+
+ cheat_wrapper()
+"""
import re
import json
@@ -25,6 +34,11 @@
def cheat_wrapper(query, request_options=None, output_format="ansi"):
+ """
+ Function that delivers cheat sheet for `query`.
+ If `html` is True, the answer is formatted as HTML.
+ Additional request options specified in `request_options`.
+ """
def _rewrite_aliases(word):
if word == ":bash.completion":
@@ -32,6 +46,10 @@ return word
def _rewrite_section_name(query):
+ """
+ Rewriting special section names:
+ * EDITOR:NAME => emacs:go-mode
+ """
if "/" not in query:
return query
@@ -104,4 +122,4 @@ return frontend.html.visualize(answer_data, request_options)
elif output_format == "json":
return json.dumps(answer_data, indent=4)
- return frontend.ansi.visualize(answer_data, request_options)+ return frontend.ansi.visualize(answer_data, request_options)
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/cheat_wrapper.py |
Write docstrings for this repository |
import string
import os
import random
from config import CONFIG
def _save_cheatsheet(topic_name, cheatsheet):
nonce = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(9)
)
filename = topic_name.replace("/", ".") + "." + nonce
filename = os.path.join(CONFIG["path.spool"], filename)
open(filename, "w").write(cheatsheet)
def process_post_request(req, topic):
for key, val in req.form.items():
if key == "":
if topic is None:
topic_name = "UNNAMED"
else:
topic_name = topic
cheatsheet = val
else:
if val == "":
if topic is None:
topic_name = "UNNAMED"
else:
topic_name = topic
cheatsheet = key
else:
topic_name = key
cheatsheet = val
_save_cheatsheet(topic_name, cheatsheet) | --- +++ @@ -1,3 +1,11 @@+"""
+POST requests processing.
+Currently used only for new cheat sheets submission.
+
+Configuration parameters:
+
+ path.spool
+"""
import string
import os
@@ -6,6 +14,10 @@
def _save_cheatsheet(topic_name, cheatsheet):
+ """
+ Save posted cheat sheet `cheatsheet` with `topic_name`
+ in the spool directory
+ """
nonce = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(9)
@@ -17,6 +29,9 @@
def process_post_request(req, topic):
+ """
+ Process POST request `req`.
+ """
for key, val in req.form.items():
if key == "":
if topic is None:
@@ -35,4 +50,4 @@ topic_name = key
cheatsheet = val
- _save_cheatsheet(topic_name, cheatsheet)+ _save_cheatsheet(topic_name, cheatsheet)
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/post.py |
Replace inline comments with docstrings | #!/usr/bin/env python
# vim: set encoding=utf-8
# pylint: disable=wrong-import-position,wrong-import-order
from __future__ import print_function
import sys
if sys.version_info[0] < 3:
reload(sys)
sys.setdefaultencoding("utf8")
import sys
import logging
import os
import requests
import jinja2
from flask import Flask, request, send_from_directory, redirect, Response
sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", "lib")))
from config import CONFIG
from limits import Limits
from cheat_wrapper import cheat_wrapper
from post import process_post_request
from options import parse_args
from stateful_queries import save_query, last_query
if not os.path.exists(os.path.dirname(CONFIG["path.log.main"])):
os.makedirs(os.path.dirname(CONFIG["path.log.main"]))
logging.basicConfig(
filename=CONFIG["path.log.main"],
level=logging.DEBUG,
format="%(asctime)s %(message)s",
)
# Fix Flask "exception and request logging" to `stderr`.
#
# When Flask's werkzeug detects that logging is already set, it
# doesn't add its own logger that prints exceptions.
stderr_handler = logging.StreamHandler()
logging.getLogger().addHandler(stderr_handler)
#
# Alter log format to disting log lines from everything else
stderr_handler.setFormatter(logging.Formatter("%(filename)s:%(lineno)s: %(message)s"))
#
# Sometimes werkzeug starts logging before an app is imported
# (https://github.com/pallets/werkzeug/issues/1969)
# resulting in duplicating lines. In that case we need root
# stderr handler to skip lines from werkzeug.
class SkipFlaskLogger(object):
def filter(self, record):
if record.name != "werkzeug":
return True
if logging.getLogger("werkzeug").handlers:
stderr_handler.addFilter(SkipFlaskLogger())
app = Flask(__name__) # pylint: disable=invalid-name
app.jinja_loader = jinja2.ChoiceLoader(
[app.jinja_loader, jinja2.FileSystemLoader(CONFIG["path.internal.templates"])]
)
LIMITS = Limits()
PLAIN_TEXT_AGENTS = [
"curl",
"httpie",
"lwp-request",
"wget",
"python-requests",
"openbsd ftp",
"powershell",
"fetch",
"aiohttp",
"xh",
]
def _is_html_needed(user_agent):
return all([x not in user_agent for x in PLAIN_TEXT_AGENTS])
def is_result_a_script(query):
return query in [":cht.sh"]
@app.route("/files/<path:path>")
def send_static(path):
return send_from_directory(CONFIG["path.internal.static"], path)
@app.route("/favicon.ico")
def send_favicon():
return send_from_directory(CONFIG["path.internal.static"], "favicon.ico")
@app.route("/malformed-response.html")
def send_malformed():
dirname, filename = os.path.split(CONFIG["path.internal.malformed"])
return send_from_directory(dirname, filename)
def log_query(ip_addr, found, topic, user_agent):
log_entry = "%s %s %s %s\n" % (ip_addr, found, topic, user_agent)
with open(CONFIG["path.log.queries"], "ab") as my_file:
my_file.write(log_entry.encode("utf-8"))
def get_request_ip(req):
if req.headers.getlist("X-Forwarded-For"):
ip_addr = req.headers.getlist("X-Forwarded-For")[0]
if ip_addr.startswith("::ffff:"):
ip_addr = ip_addr[7:]
else:
ip_addr = req.remote_addr
if req.headers.getlist("X-Forwarded-For"):
ip_addr = req.headers.getlist("X-Forwarded-For")[0]
if ip_addr.startswith("::ffff:"):
ip_addr = ip_addr[7:]
else:
ip_addr = req.remote_addr
return ip_addr
def get_answer_language(request):
def _parse_accept_language(accept_language):
languages = accept_language.split(",")
locale_q_pairs = []
for language in languages:
try:
if language.split(";")[0] == language:
# no q => q = 1
locale_q_pairs.append((language.strip(), "1"))
else:
locale = language.split(";")[0].strip()
weight = language.split(";")[1].split("=")[1]
locale_q_pairs.append((locale, weight))
except IndexError:
pass
return locale_q_pairs
def _find_supported_language(accepted_languages):
for lang_tuple in accepted_languages:
lang = lang_tuple[0]
if "-" in lang:
lang = lang.split("-", 1)[0]
return lang
return None
lang = None
hostname = request.headers["Host"]
if hostname.endswith(".cheat.sh"):
lang = hostname[:-9]
if "lang" in request.args:
lang = request.args.get("lang")
header_accept_language = request.headers.get("Accept-Language", "")
if lang is None and header_accept_language:
lang = _find_supported_language(_parse_accept_language(header_accept_language))
return lang
def _proxy(*args, **kwargs):
# print "method=", request.method,
# print "url=", request.url.replace('/:shell-x/', ':3000/')
# print "headers=", {key: value for (key, value) in request.headers if key != 'Host'}
# print "data=", request.get_data()
# print "cookies=", request.cookies
# print "allow_redirects=", False
url_before, url_after = request.url.split("/:shell-x/", 1)
url = url_before + ":3000/"
if "q" in request.args:
url_after = "?" + "&".join("arg=%s" % x for x in request.args["q"].split())
url += url_after
print(url)
print(request.get_data())
resp = requests.request(
method=request.method,
url=url,
headers={key: value for (key, value) in request.headers if key != "Host"},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False,
)
excluded_headers = [
"content-encoding",
"content-length",
"transfer-encoding",
"connection",
]
headers = [
(name, value)
for (name, value) in resp.raw.headers.items()
if name.lower() not in excluded_headers
]
response = Response(resp.content, resp.status_code, headers)
return response
@app.route("/", methods=["GET", "POST"])
@app.route("/<path:topic>", methods=["GET", "POST"])
def answer(topic=None):
user_agent = request.headers.get("User-Agent", "").lower()
html_needed = _is_html_needed(user_agent)
options = parse_args(request.args)
if topic in [
"apple-touch-icon-precomposed.png",
"apple-touch-icon.png",
"apple-touch-icon-120x120-precomposed.png",
] or (topic is not None and any(topic.endswith("/" + x) for x in ["favicon.ico"])):
return ""
request_id = request.cookies.get("id")
if topic is not None and topic.lstrip("/") == ":last":
if request_id:
topic = last_query(request_id)
else:
return "ERROR: you have to set id for your requests to use /:last\n"
else:
if request_id:
save_query(request_id, topic)
if request.method == "POST":
process_post_request(request, html_needed)
if html_needed:
return redirect("/")
return "OK\n"
if "topic" in request.args:
return redirect("/%s" % request.args.get("topic"))
if topic is None:
topic = ":firstpage"
if topic.startswith(":shell-x/"):
return _proxy()
# return requests.get('http://127.0.0.1:3000'+topic[8:]).text
lang = get_answer_language(request)
if lang:
options["lang"] = lang
ip_address = get_request_ip(request)
if "+" in topic:
not_allowed = LIMITS.check_ip(ip_address)
if not_allowed:
return "429 %s\n" % not_allowed, 429
html_is_needed = _is_html_needed(user_agent) and not is_result_a_script(topic)
if html_is_needed:
output_format = "html"
else:
output_format = "ansi"
result, found = cheat_wrapper(
topic, request_options=options, output_format=output_format
)
if "Please come back in several hours" in result and html_is_needed:
malformed_response = open(
os.path.join(CONFIG["path.internal.malformed"])
).read()
return malformed_response
log_query(ip_address, found, topic, user_agent)
if html_is_needed:
return result
return Response(result, mimetype="text/plain") | --- +++ @@ -2,6 +2,17 @@ # vim: set encoding=utf-8
# pylint: disable=wrong-import-position,wrong-import-order
+"""
+Main server program.
+
+Configuration parameters:
+
+ path.internal.malformed
+ path.internal.static
+ path.internal.templates
+ path.log.main
+ path.log.queries
+"""
from __future__ import print_function
@@ -83,6 +94,9 @@
def _is_html_needed(user_agent):
+ """
+ Basing on `user_agent`, return whether it needs HTML or ANSI
+ """
return all([x not in user_agent for x in PLAIN_TEXT_AGENTS])
@@ -92,27 +106,45 @@
@app.route("/files/<path:path>")
def send_static(path):
+ """
+ Return static file `path`.
+ Can be served by the HTTP frontend.
+ """
return send_from_directory(CONFIG["path.internal.static"], path)
@app.route("/favicon.ico")
def send_favicon():
+ """
+ Return static file `favicon.ico`.
+ Can be served by the HTTP frontend.
+ """
return send_from_directory(CONFIG["path.internal.static"], "favicon.ico")
@app.route("/malformed-response.html")
def send_malformed():
+ """
+ Return static file `malformed-response.html`.
+ Can be served by the HTTP frontend.
+ """
dirname, filename = os.path.split(CONFIG["path.internal.malformed"])
return send_from_directory(dirname, filename)
def log_query(ip_addr, found, topic, user_agent):
+ """
+ Log processed query and some internal data
+ """
log_entry = "%s %s %s %s\n" % (ip_addr, found, topic, user_agent)
with open(CONFIG["path.log.queries"], "ab") as my_file:
my_file.write(log_entry.encode("utf-8"))
def get_request_ip(req):
+ """
+ Extract IP address from `request`
+ """
if req.headers.getlist("X-Forwarded-For"):
ip_addr = req.headers.getlist("X-Forwarded-For")[0]
@@ -131,6 +163,10 @@
def get_answer_language(request):
+ """
+ Return preferred answer language based on
+ domain name, query arguments and headers
+ """
def _parse_accept_language(accept_language):
languages = accept_language.split(",")
@@ -218,6 +254,17 @@ @app.route("/", methods=["GET", "POST"])
@app.route("/<path:topic>", methods=["GET", "POST"])
def answer(topic=None):
+ """
+ Main rendering function, it processes incoming weather queries.
+ Depending on user agent it returns output in HTML or ANSI format.
+
+ Incoming data:
+ request.args
+ request.headers
+ request.remote_addr
+ request.referrer
+ request.query_string
+ """
user_agent = request.headers.get("User-Agent", "").lower()
html_needed = _is_html_needed(user_agent)
@@ -283,4 +330,4 @@ log_query(ip_address, found, topic, user_agent)
if html_is_needed:
return result
- return Response(result, mimetype="text/plain")+ return Response(result, mimetype="text/plain")
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/bin/app.py |
Generate docstrings for this script | # vim: encoding=utf-8
import os
import sys
import colored
import itertools
from globals import MYDIR
"""
After panela will be ready for it, it will be split out in a separate project,
that will be used for all chubin's console services.
There are several features that not yet implemented (see ___doc___ in Panela)
TODO:
* html output
* png output
"""
from wcwidth import wcswidth
from colors import find_nearest_color, HEX_TO_ANSI, rgb_from_str
import pyte
# http://stackoverflow.com/questions/19782975/convert-rgb-color-to-the-nearest-color-in-palette-web-safe-color
try:
basestring # Python 2
except NameError:
basestring = str # Python 3
def color_mapping(clr):
if clr == "default":
return None
return clr
class Point(object):
def __init__(self, char=None, foreground=None, background=None):
self.foreground = foreground
self.background = background
self.char = char
class Panela:
def __init__(self, x=80, y=25, panela=None, field=None):
if panela:
self.field = [x for x in panela.field]
self.size_x = panela.size_x
self.size_y = panela.size_y
return
if field:
self.field = field
self.size_x = len(field[0])
self.size_y = len(field)
return
self.field = [[Point() for _ in range(x)] for _ in range(y)]
self.size_x = x
self.size_y = y
def in_field(self, col, row):
if col < 0:
return False
if row < 0:
return False
if col >= self.size_x:
return False
if row >= self.size_y:
return False
return True
#
# Blocks manipulation
#
def copy(self, x1, y1, x2, y2):
if x1 < 0:
x1 += self.size_x
if x2 < 0:
x2 += self.size_x
if x1 > x2:
x1, x2 = x2, x1
if y1 < 0:
y1 += self.size_y
if y2 < 0:
y2 += self.size_y
if y1 > y2:
y1, y2 = y2, y1
field = [self.field[i] for i in range(y1, y2 + 1)]
field = [line[x1 : x2 + 1] for line in field]
return Panela(field=field)
def cut(self, x1, y1, x2, y2):
if x1 < 0:
x1 += self.size_x
if x2 < 0:
x2 += self.size_x
if x1 > x2:
x1, x2 = x2, x1
if y1 < 0:
y1 += self.size_y
if y2 < 0:
y2 += self.size_y
if y1 > y2:
y1, y2 = y2, y1
copied = self.copy(x1, y1, x2, y2)
for y in range(y1, y2 + 1):
for x in range(x1, x2 + 1):
self.field[y][x] = Point()
return copied
def extend(self, cols=None, rows=None):
if cols and cols > 0:
self.field = [x + [Point() for _ in range(cols)] for x in self.field]
self.size_x += cols
if rows and rows > 0:
self.field = self.field + [
[Point() for _ in range(self.size_x)] for _ in range(rows)
]
self.size_y += rows
def crop(self, left=None, right=None, top=None, bottom=None):
if left:
if left >= self.size_x:
left = self.size_x
self.field = [x[left:] for x in self.field]
self.size_x -= left
if right:
if right >= self.size_x:
right = self.size_x
self.field = [x[:-right] for x in self.field]
self.size_x -= right
if top:
if top >= self.size_y:
top = self.size_y
self.field = self.field[top:]
self.size_y -= top
if bottom:
if bottom >= self.size_y:
bottom = self.size_y
self.field = self.field[:-bottom]
self.size_y -= bottom
def paste(self, panela, x1, y1, extend=False, transparence=False):
# FIXME:
# negative x1, y1
# x1,y1 > size_x, size_y
if extend:
x_extend = 0
y_extend = 0
if x1 + panela.size_x > self.size_x:
x_extend = x1 + panela.size_x - self.size_x
if y1 + panela.size_y > self.size_y:
y_extend = y1 + panela.size_y - self.size_y
self.extend(cols=x_extend, rows=y_extend)
for i in range(y1, min(self.size_y, y1 + panela.size_y)):
for j in range(x1, min(self.size_x, x1 + panela.size_x)):
if transparence:
if (
panela.field[i - y1][j - x1].char
and panela.field[i - y1][j - x1].char != " "
):
if panela.field[i - y1][j - x1].foreground:
self.field[i][j].foreground = panela.field[i - y1][
j - x1
].foreground
if panela.field[i - y1][j - x1].background:
self.field[i][j].background = panela.field[i - y1][
j - x1
].background
self.field[i][j].char = panela.field[i - y1][j - x1].char
else:
self.field[i][j] = panela.field[i - y1][j - x1]
def strip(self):
def left_spaces(line):
answer = 0
for elem in line:
if not elem.char:
answer += 1
else:
break
return answer
def right_spaces(line):
return left_spaces(line[::-1])
def empty_line(line):
return left_spaces(line) == len(line)
left_space = []
right_space = []
for line in self.field:
left_space.append(left_spaces(line))
right_space.append(right_spaces(line))
left = min(left_space)
right = min(right_space)
top = 0
while top < self.size_y and empty_line(self.field[top]):
top += 1
bottom = 0
while bottom < self.size_y and empty_line(self.field[-(bottom + 1)]):
bottom += 1
self.crop(left=left, right=right, top=top, bottom=bottom)
#
# Drawing and painting
#
def put_point(self, col, row, char=None, color=None, background=None):
if not self.in_field(col, row):
return
if isinstance(char, Point):
self.field[row][col] = char
elif char is None:
if background:
self.field[row][col].background = background
if color:
self.field[row][col].foreground = color
else:
self.field[row][col] = Point(
char=char, foreground=color, background=background
)
def put_string(self, col, row, s=None, color=None, background=None):
for i, c in enumerate(s):
self.put_point(col + i, row, c, color=color, background=background)
def put_line(self, x1, y1, x2, y2, char=None, color=None, background=None):
def get_line(start, end):
# Setup initial conditions
x1, y1 = start
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
# Determine how steep the line is
is_steep = abs(dy) > abs(dx)
# Rotate line
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
# Swap start and end points if necessary and store swap state
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
# Recalculate differentials
dx = x2 - x1
dy = y2 - y1
# Calculate error
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
# Iterate over bounding box generating points between start and end
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
# Reverse the list if the coordinates were swapped
if swapped:
points.reverse()
return points
if color and not isinstance(color, basestring):
color_iter = itertools.cycle(color)
else:
color_iter = itertools.repeat(color)
if background and not isinstance(background, basestring):
background_iter = itertools.cycle(background)
else:
background_iter = itertools.repeat(background)
if char:
char_iter = itertools.cycle(char)
else:
char_iter = itertools.repeat(char)
for x, y in get_line((x1, y1), (x2, y2)):
char = next(char_iter)
color = next(color_iter)
background = next(background_iter)
self.put_point(x, y, char=char, color=color, background=background)
def paint(
self, x1, y1, x2, y2, c1, c2=None, bg1=None, bg2=None, angle=None, angle_bg=None
):
def calculate_color(i, j):
if angle == None:
a = 0
else:
a = angle
r1, g1, b1 = rgb_from_str(c1)
r2, g2, b2 = rgb_from_str(c2)
k = 1.0 * (j - x1) / (x2 - x1) * (1 - a)
l = 1.0 * (i - y1) / (y2 - y1) * a
r3, g3, b3 = (
int(r1 + 1.0 * (r2 - r1) * (k + l)),
int(g1 + 1.0 * (g2 - g1) * (k + l)),
int(b1 + 1.0 * (b2 - b1) * (k + l)),
)
return "#%02x%02x%02x" % (r3, g3, b3)
def calculate_bg(i, j):
if angle_bg == None:
a = 0
else:
a = angle
r1, g1, b1 = rgb_from_str(bg1)
r2, g2, b2 = rgb_from_str(bg2)
k = 1.0 * (j - x1) / (x2 - x1) * (1 - a)
l = 1.0 * (i - y1) / (y2 - y1) * a
r3, g3, b3 = (
int(r1 + 1.0 * (r2 - r1) * (k + l)),
int(g1 + 1.0 * (g2 - g1) * (k + l)),
int(b1 + 1.0 * (b2 - b1) * (k + l)),
)
return "#%02x%02x%02x" % (r3, g3, b3)
if c2 == None:
for i in range(y1, y2):
for j in range(x1, x2):
self.field[i][j].foreground = c1
if bg1:
if bg2:
self.field[i][j].background = calculate_bg(i, j)
else:
self.field[i][j].background = bg1
else:
for i in range(y1, y2):
for j in range(x1, x2):
self.field[i][j].foreground = calculate_color(i, j)
if bg1:
if bg2:
self.field[i][j].background = calculate_bg(i, j)
else:
self.field[i][j].background = bg1
return self
def put_rectangle(
self, x1, y1, x2, y2, char=None, frame=None, color=None, background=None
):
frame_chars = {
"ascii": "++++-|",
"single": "┌┐└┘─│",
"double": "┌┐└┘─│",
}
if frame in frame_chars:
chars = frame_chars[frame]
else:
chars = char * 6
for x in range(x1, x2):
self.put_point(x, y1, char=chars[4], color=color, background=background)
self.put_point(x, y2, char=chars[4], color=color, background=background)
for y in range(y1, y2):
self.put_point(x1, y, char=chars[5], color=color, background=background)
self.put_point(x2, y, char=chars[5], color=color, background=background)
self.put_point(x1, y1, char=chars[0], color=color, background=background)
self.put_point(x2, y1, char=chars[1], color=color, background=background)
self.put_point(x1, y2, char=chars[2], color=color, background=background)
self.put_point(x2, y2, char=chars[3], color=color, background=background)
def put_circle(self, x0, y0, radius, char=None, color=None, background=None):
def k(x):
return int(x * 1.9)
f = 1 - radius
ddf_x = 1
ddf_y = -2 * radius
x = 0
y = radius
self.put_point(x0, y0 + radius, char=char, color=color, background=background)
self.put_point(x0, y0 - radius, char=char, color=color, background=background)
self.put_point(
x0 + k(radius), y0, char=char, color=color, background=background
)
self.put_point(
x0 - k(radius), y0, char=char, color=color, background=background
)
char = "x"
while x < y:
if f >= 0:
y -= 1
ddf_y += 2
f += ddf_y
x += 1
ddf_x += 2
f += ddf_x
self.put_point(
x0 + k(x), y0 + y, char=char, color=color, background=background
)
self.put_point(
x0 - k(x), y0 + y, char=char, color=color, background=background
)
self.put_point(
x0 + k(x), y0 - y, char=char, color=color, background=background
)
self.put_point(
x0 - k(x), y0 - y, char=char, color=color, background=background
)
self.put_point(
x0 + k(y), y0 + x, char=char, color=color, background=background
)
self.put_point(
x0 - k(y), y0 + x, char=char, color=color, background=background
)
self.put_point(
x0 + k(y), y0 - x, char=char, color=color, background=background
)
self.put_point(
x0 - k(y), y0 - x, char=char, color=color, background=background
)
def read_ansi(self, seq, x=0, y=0, transparence=True):
screen = pyte.screens.Screen(self.size_x, self.size_y + 1)
stream = pyte.streams.ByteStream()
stream.attach(screen)
stream.feed(seq.replace("\n", "\r\n"))
for i, line in sorted(screen.buffer.items(), key=lambda x: x[0]):
for j, char in sorted(line.items(), key=lambda x: x[0]):
if j >= self.size_x:
break
self.field[i][j] = Point(
char.data, color_mapping(char.fg), color_mapping(char.bg)
)
def __str__(self):
answer = ""
skip_next = False
for i, line in enumerate(self.field):
for j, c in enumerate(line):
fg_ansi = ""
bg_ansi = ""
stop = ""
if self.field[i][j].foreground:
fg_ansi = "\033[38;2;%s;%s;%sm" % rgb_from_str(
self.field[i][j].foreground
)
stop = colored.attr("reset")
if self.field[i][j].background:
bg_ansi = "\033[48;2;%s;%s;%sm" % rgb_from_str(
self.field[i][j].background
)
stop = colored.attr("reset")
char = c.char or " "
if not skip_next:
answer += fg_ansi + bg_ansi + char.encode("utf-8") + stop
skip_next = wcswidth(char) == 2
# answer += "...\n"
answer += "\n"
return answer
########################################################################################################
class Template(object):
def __init__(self):
self._mode = "page"
self.page = []
self.mask = []
self.code = []
self.panela = None
self._colors = {
"A": "#00cc00",
"B": "#00cc00",
"C": "#00aacc",
"D": "#888888",
"E": "#cccc00",
"F": "#ff0000",
"H": "#22aa22",
"I": "#cc0000",
"J": "#000000",
}
self._bg_colors = {
"G": "#555555",
"J": "#555555",
}
def _process_line(self, line):
if line == "mask":
self._mode = "mask"
if line == "":
self._mode = "code"
def read(self, filename):
with open(filename) as f:
self._mode = "page"
for line in f.readlines():
line = line.rstrip("\n")
if line.startswith("==[") and line.endswith("]=="):
self._process_line(line[3:-3].strip())
continue
if self._mode == "page":
self.page.append(line)
elif self._mode == "mask":
self.mask.append(line)
elif self._mode == "code":
self.mask.append(line)
def apply_mask(self):
lines = self.page
x_size = max([len(x) for x in lines])
y_size = len(lines)
self.panela = Panela(x=x_size, y=y_size)
self.panela.read_ansi("".join("%s\n" % x for x in self.page))
for i, line in enumerate(self.mask):
for j, char in enumerate(line):
if char in self._colors or char in self._bg_colors:
color = self._colors.get(char)
bg_color = self._bg_colors.get(char)
self.panela.put_point(j, i, color=color, background=bg_color)
def show(self):
if self.panela:
return str(self.panela)
return self.page
def main():
pagepath = os.path.join(MYDIR, "share/firstpage-v2.pnl")
template = Template()
template.read(pagepath)
template.apply_mask()
sys.stdout.write(template.show())
if __name__ == "__main__":
main() | --- +++ @@ -37,6 +37,9 @@
class Point(object):
+ """
+ One point (character) on a terminal
+ """
def __init__(self, char=None, foreground=None, background=None):
self.foreground = foreground
@@ -45,6 +48,40 @@
class Panela:
+ """
+ To implement:
+
+ Blocks manipulation:
+
+ [*] copy
+ [*] crop
+ [*] cut
+ [*] extend
+ [ ] join
+ [ ] move
+ [*] paste
+ [*] strip
+
+ Colors manipulation:
+
+ [*] paint foreground/background
+ [*] paint_line
+ [ ] paint_svg
+ [ ] fill background
+ [ ] fill_line
+ [ ] fill_svg
+ [ ] trans
+
+ Drawing:
+
+ [*] put_point
+ [*] put_line
+ [*] put_circle
+ [*] put_rectangle
+
+ Printing and reading:
+ ansi reads vt100 sequence
+ """
def __init__(self, x=80, y=25, panela=None, field=None):
@@ -101,6 +138,7 @@ return Panela(field=field)
def cut(self, x1, y1, x2, y2):
+ """ """
if x1 < 0:
x1 += self.size_x
if x2 < 0:
@@ -124,6 +162,10 @@ return copied
def extend(self, cols=None, rows=None):
+ """
+ Adds [cols] columns from the right
+ and [rows] rows from the bottom
+ """
if cols and cols > 0:
self.field = [x + [Point() for _ in range(cols)] for x in self.field]
self.size_x += cols
@@ -135,6 +177,11 @@ self.size_y += rows
def crop(self, left=None, right=None, top=None, bottom=None):
+ """
+ Crop panela.
+ Remove <left>, <right> columns from left or right,
+ and <top> and <bottom> rows from top and bottom.
+ """
if left:
if left >= self.size_x:
@@ -161,6 +208,11 @@ self.size_y -= bottom
def paste(self, panela, x1, y1, extend=False, transparence=False):
+ """
+ Paste <panela> starting at <x1>, <y1>.
+ If <extend> is True current panela space will be automatically extended
+ If <transparence> is True, then <panela> is overlaid and characters behind them are seen
+ """
# FIXME:
# negative x1, y1
@@ -195,6 +247,9 @@ self.field[i][j] = panela.field[i - y1][j - x1]
def strip(self):
+ """
+ Strip panela: remove empty spaces around panels rectangle
+ """
def left_spaces(line):
answer = 0
@@ -234,6 +289,10 @@ #
def put_point(self, col, row, char=None, color=None, background=None):
+ """
+ Puts character with color and background color on the field.
+ Char can be a Point or a character.
+ """
if not self.in_field(col, row):
return
@@ -250,12 +309,33 @@ )
def put_string(self, col, row, s=None, color=None, background=None):
+ """
+ Put string <s> with foreground color <color> and background color <background>
+ ad <col>, <row>
+ """
for i, c in enumerate(s):
self.put_point(col + i, row, c, color=color, background=background)
def put_line(self, x1, y1, x2, y2, char=None, color=None, background=None):
+ """
+ Draw line (x1, y1) - (x2, y2) fill foreground color <color>, background color <background>
+ and character <char>, if specified.
+ """
def get_line(start, end):
+ """Bresenham's Line Algorithm
+ Produces a list of tuples from start and end
+
+ Source: http://www.roguebasin.com/index.php?title=Bresenham%27s_Line_Algorithm#Python
+
+ >>> points1 = get_line((0, 0), (3, 4))
+ >>> points2 = get_line((3, 4), (0, 0))
+ >>> assert(set(points1) == set(points2))
+ >>> print points1
+ [(0, 0), (1, 1), (1, 2), (2, 3), (3, 4)]
+ >>> print points2
+ [(3, 4), (2, 3), (1, 2), (1, 1), (0, 0)]
+ """
# Setup initial conditions
x1, y1 = start
x2, y2 = end
@@ -326,6 +406,10 @@ def paint(
self, x1, y1, x2, y2, c1, c2=None, bg1=None, bg2=None, angle=None, angle_bg=None
):
+ """
+ Paint rectangle (x1,y1) (x2,y2) with foreground color c1 and background bg1 if specified.
+ If specified colors c2/bg2, rectangle is painted with linear gradient (inclined under angle).
+ """
def calculate_color(i, j):
if angle == None:
@@ -387,6 +471,9 @@ def put_rectangle(
self, x1, y1, x2, y2, char=None, frame=None, color=None, background=None
):
+ """
+ Draw rectangle (x1,y1), (x2,y2) using <char> character, <color> and <background> color
+ """
frame_chars = {
"ascii": "++++-|",
@@ -412,6 +499,10 @@ self.put_point(x2, y2, char=chars[3], color=color, background=background)
def put_circle(self, x0, y0, radius, char=None, color=None, background=None):
+ """
+ Draw cricle with center in (x, y) and radius r (x1,y1), (x2,y2)
+ using <char> character, <color> and <background> color
+ """
def k(x):
return int(x * 1.9)
@@ -465,6 +556,10 @@ )
def read_ansi(self, seq, x=0, y=0, transparence=True):
+ """
+ Read ANSI sequence and render it to the panela starting from x and y.
+ If transparence is True, replace spaces with ""
+ """
screen = pyte.screens.Screen(self.size_x, self.size_y + 1)
stream = pyte.streams.ByteStream()
@@ -546,6 +641,9 @@ self._mode = "code"
def read(self, filename):
+ """
+ Read template from `filename`
+ """
with open(filename) as f:
self._mode = "page"
for line in f.readlines():
@@ -586,6 +684,7 @@
def main():
+ "Only for experiments"
pagepath = os.path.join(MYDIR, "share/firstpage-v2.pnl")
template = Template()
@@ -595,4 +694,4 @@
if __name__ == "__main__":
- main()+ main()
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/panela/panela_colors.py |
Write docstrings for this repository |
# pylint: disable=relative-import
from __future__ import print_function
import os
import re
from subprocess import Popen, PIPE
from polyglot.detect import Detector
from polyglot.detect.base import UnknownLanguage
from config import CONFIG
from languages_data import SO_NAME
from .upstream import UpstreamAdapter
NOT_FOUND_MESSAGE = """404 NOT FOUND
Unknown cheat sheet. Please try to reformulate your query.
Query format:
/LANG/QUESTION
Examples:
/python/read+json
/golang/run+external+program
/js/regex+search
See /:help for more info.
If the problem persists, file a GitHub issue at
github.com/chubin/cheat.sh or ping @igor_chubin
"""
class Question(UpstreamAdapter):
_adapter_name = "question"
_output_format = "text+code"
_cache_needed = True
def _get_page(self, topic, request_options=None):
if not os.path.exists(CONFIG["path.internal.bin.upstream"]):
# if the upstream program is not found, use normal upstream adapter
self._output_format = "ansi"
return UpstreamAdapter._get_page(
self, topic, request_options=request_options
)
topic = topic.replace("+", " ")
# if there is a language name in the section name,
# cut it off (de:python => python)
if "/" in topic:
section_name, topic = topic.split("/", 1)
if ":" in section_name:
_, section_name = section_name.split(":", 1)
section_name = SO_NAME.get(section_name, section_name)
topic = "%s/%s" % (section_name, topic)
# some clients send queries with - instead of + so we have to rewrite them to
topic = re.sub(r"(?<!-)-", " ", topic)
topic_words = topic.split()
topic = " ".join(topic_words)
lang = "en"
try:
query_text = topic # " ".join(topic)
query_text = re.sub("^[^/]*/+", "", query_text.rstrip("/"))
query_text = re.sub("/[0-9]+$", "", query_text)
query_text = re.sub("/[0-9]+$", "", query_text)
detector = Detector(query_text)
supposed_lang = detector.languages[0].code
if len(topic_words) > 2 or supposed_lang in [
"az",
"ru",
"uk",
"de",
"fr",
"es",
"it",
"nl",
]:
lang = supposed_lang
if supposed_lang.startswith("zh_") or supposed_lang == "zh":
lang = "zh"
elif supposed_lang.startswith("pt_"):
lang = "pt"
if supposed_lang in ["ja", "ko"]:
lang = supposed_lang
except UnknownLanguage:
print("Unknown language (%s)" % query_text)
if lang != "en":
topic = ["--human-language", lang, topic]
else:
topic = [topic]
cmd = [CONFIG["path.internal.bin.upstream"]] + topic
proc = Popen(cmd, stdin=open(os.devnull, "r"), stdout=PIPE, stderr=PIPE)
answer = proc.communicate()[0].decode("utf-8")
if not answer:
return NOT_FOUND_MESSAGE
return answer
def get_list(self, prefix=None):
return []
def is_found(self, topic):
return True | --- +++ @@ -1,3 +1,8 @@+"""
+Configuration parameters:
+
+ path.internal.bin.upstream
+"""
# pylint: disable=relative-import
@@ -35,12 +40,24 @@
class Question(UpstreamAdapter):
+ """
+ Answer to a programming language question, using Stackoverflow
+ as the main data source. Heavy lifting is done by an external
+ program `CONFIG["path.internal.bin.upstream"]`.
+
+ If the program is not found, fallback to the superclass `UpstreamAdapter`,
+ which queries the upstream server (by default https://cheat.sh/)
+ for the answer
+ """
_adapter_name = "question"
_output_format = "text+code"
_cache_needed = True
def _get_page(self, topic, request_options=None):
+ """
+ Find answer for the `topic` question.
+ """
if not os.path.exists(CONFIG["path.internal.bin.upstream"]):
# if the upstream program is not found, use normal upstream adapter
@@ -114,4 +131,4 @@ return []
def is_found(self, topic):
- return True+ return True
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/adapter/question.py |
Add concise docstrings to each method |
import random
import re
from typing import Any, Dict, List
import cache
import adapter.cheat_sheets
import adapter.cmd
import adapter.internal
import adapter.latenz
import adapter.learnxiny
import adapter.question
import adapter.rosetta
from config import CONFIG
class Router(object):
def __init__(self):
self._cached_topics_list = []
self._cached_topic_type = {}
adapter_class = adapter.all_adapters(as_dict=True)
active_adapters = set(CONFIG["adapters.active"] + CONFIG["adapters.mandatory"])
self._adapter = {
"internal": adapter.internal.InternalPages(
get_topic_type=self.get_topic_type, get_topics_list=self.get_topics_list
),
"unknown": adapter.internal.UnknownPages(
get_topic_type=self.get_topic_type, get_topics_list=self.get_topics_list
),
}
for by_name in active_adapters:
if by_name not in self._adapter:
self._adapter[by_name] = adapter_class[by_name]()
self._topic_list = {key: obj.get_list() for key, obj in self._adapter.items()}
self.routing_table = CONFIG["routing.main"]
self.routing_table = (
CONFIG["routing.pre"] + self.routing_table + CONFIG["routing.post"]
)
def get_topics_list(self, skip_dirs=False, skip_internal=False):
if self._cached_topics_list:
return self._cached_topics_list
skip = ["fosdem"]
if skip_dirs:
skip.append("cheat.sheets dir")
if skip_internal:
skip.append("internal")
sources_to_merge = [x for x in self._adapter if x not in skip]
answer = {}
for key in sources_to_merge:
answer.update({name: key for name in self._topic_list[key]})
answer = sorted(set(answer.keys()))
self._cached_topics_list = answer
return answer
def get_topic_type(self, topic: str) -> List[str]:
def __get_topic_type(topic: str) -> List[str]:
result = []
for regexp, route in self.routing_table:
if re.search(regexp, topic):
if route in self._adapter:
if self._adapter[route].is_found(topic):
result.append(route)
else:
result.append(route)
if not result:
return [CONFIG["routing.default"]]
# cut the default route off, if there are more than one route found
if len(result) > 1:
return result[:-1]
return result
if topic not in self._cached_topic_type:
self._cached_topic_type[topic] = __get_topic_type(topic)
return self._cached_topic_type[topic]
def _get_page_dict(self, query, topic_type, request_options=None):
return self._adapter[topic_type].get_page_dict(
query, request_options=request_options
)
def handle_if_random_request(self, topic):
def __select_random_topic(prefix, topic_list):
# Here we remove the special cases
cleaned_topic_list = [
x for x in topic_list if "/" not in x and ":" not in x
]
# Here we still check that cleaned_topic_list in not empty
if not cleaned_topic_list:
return prefix
random_topic = random.choice(cleaned_topic_list)
return prefix + random_topic
if topic.endswith("/:random") or topic.lstrip("/") == ":random":
# We strip the :random part and see if the query is valid by running a get_topics_list()
if topic.lstrip("/") == ":random":
topic = topic.lstrip("/")
prefix = topic[:-7]
topic_list = [
x[len(prefix) :] for x in self.get_topics_list() if x.startswith(prefix)
]
if "" in topic_list:
topic_list.remove("")
if topic_list:
# This is a correct formatted random query like /cpp/:random as the topic_list is not empty.
random_topic = __select_random_topic(prefix, topic_list)
return random_topic
else:
# This is a wrongly formatted random query like /xyxyxy/:random as the topic_list is empty
# we just strip the /:random and let the already implemented logic handle it.
wrongly_formatted_random = topic[:-8]
return wrongly_formatted_random
# Here if not a random request, we just forward the topic
return topic
def get_answers(
self, topic: str, request_options: Dict[str, str] = None
) -> List[Dict[str, Any]]:
# if topic specified as <topic_type>:<topic>,
# cut <topic_type> off
topic_type = ""
if re.match("[^/]+:", topic):
topic_type, topic = topic.split(":", 1)
topic = self.handle_if_random_request(topic)
topic_types = self.get_topic_type(topic)
# if topic_type is specified explicitly,
# show pages only of that type
if topic_type and topic_type in topic_types:
topic_types = [topic_type]
# 'question' queries are pretty expensive, that's why they should be handled
# in a special way:
# we do not drop the old style cache entries and try to reuse them if possible
if topic_types == ["question"]:
answer = cache.get("q:" + topic)
if answer:
if isinstance(answer, dict):
return [answer]
return [
{
"topic": topic,
"topic_type": "question",
"answer": answer,
"format": "text+code",
}
]
answer = self._get_page_dict(
topic, topic_types[0], request_options=request_options
)
if answer.get("cache", True):
cache.put("q:" + topic, answer)
return [answer]
# Try to find cacheable queries in the cache.
# If answer was not found in the cache, resolve it in a normal way and save in the cache
answers = []
for topic_type in topic_types:
cache_entry_name = f"{topic_type}:{topic}"
cache_needed = self._adapter[topic_type].is_cache_needed()
if cache_needed:
answer = cache.get(cache_entry_name)
if not isinstance(answer, dict):
answer = None
if answer:
answers.append(answer)
continue
answer = self._get_page_dict(
topic, topic_type, request_options=request_options
)
if isinstance(answer, dict):
if "cache" in answer:
cache_needed = answer["cache"]
if cache_needed and answer:
cache.put(cache_entry_name, answer)
answers.append(answer)
return answers
# pylint: disable=invalid-name
_ROUTER = Router()
get_topics_list = _ROUTER.get_topics_list
get_answers = _ROUTER.get_answers | --- +++ @@ -1,3 +1,11 @@+"""
+Queries routing and caching.
+
+Exports:
+
+ get_topics_list()
+ get_answers()
+"""
import random
import re
@@ -15,6 +23,13 @@
class Router(object):
+ """
+ Implementation of query routing. Routing is based on `routing_table`
+ and the data exported by the adapters (functions `get_list()` and `is_found()`).
+
+ `get_topics_list()` returns available topics (accessible at /:list).
+ `get_answer_dict()` return answer for the query.
+ """
def __init__(self):
@@ -46,6 +61,9 @@ )
def get_topics_list(self, skip_dirs=False, skip_internal=False):
+ """
+ List of topics returned on /:list
+ """
if self._cached_topics_list:
return self._cached_topics_list
@@ -66,6 +84,10 @@ return answer
def get_topic_type(self, topic: str) -> List[str]:
+ """
+ Return list of topic types for `topic`
+ or ["unknown"] if topic can't be determined.
+ """
def __get_topic_type(topic: str) -> List[str]:
result = []
@@ -89,11 +111,20 @@ return self._cached_topic_type[topic]
def _get_page_dict(self, query, topic_type, request_options=None):
+ """
+ Return answer_dict for the `query`.
+ """
return self._adapter[topic_type].get_page_dict(
query, request_options=request_options
)
def handle_if_random_request(self, topic):
+ """
+ Check if the `query` is a :random one,
+ if yes we check its correctness and then randomly select a topic,
+ based on the provided prefix.
+
+ """
def __select_random_topic(prefix, topic_list):
# Here we remove the special cases
@@ -137,6 +168,15 @@ def get_answers(
self, topic: str, request_options: Dict[str, str] = None
) -> List[Dict[str, Any]]:
+ """
+ Find cheat sheets for the topic.
+
+ Args:
+ `topic` (str): the name of the topic of the cheat sheet
+
+ Returns:
+ [answer_dict]: list of answers (dictionaries)
+ """
# if topic specified as <topic_type>:<topic>,
# cut <topic_type> off
@@ -210,4 +250,4 @@ # pylint: disable=invalid-name
_ROUTER = Router()
get_topics_list = _ROUTER.get_topics_list
-get_answers = _ROUTER.get_answers+get_answers = _ROUTER.get_answers
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/routing.py |
Generate helpful docstrings for debugging |
import sys
import os
import re
from subprocess import Popen, PIPE
MYDIR = os.path.abspath(os.path.join(__file__, "..", ".."))
sys.path.append("%s/lib/" % MYDIR)
# pylint: disable=wrong-import-position
from config import CONFIG
from globals import error
from buttons import TWITTER_BUTTON, GITHUB_BUTTON, GITHUB_BUTTON_FOOTER
import frontend.ansi
# temporary having it here, but actually we have the same data
# in the adapter module
GITHUB_REPOSITORY = {
"late.nz": "chubin/late.nz",
"cheat.sheets": "chubin/cheat.sheets",
"cheat.sheets dir": "chubin/cheat.sheets",
"tldr": "tldr-pages/tldr",
"cheat": "chrisallenlane/cheat",
"learnxiny": "adambard/learnxinyminutes-docs",
"internal": "",
"search": "",
"unknown": "",
}
def visualize(answer_data, request_options):
query = answer_data["query"]
answers = answer_data["answers"]
topics_list = answer_data["topics_list"]
editable = len(answers) == 1 and answers[0]["topic_type"] == "cheat.sheets"
repository_button = ""
if len(answers) == 1:
repository_button = _github_button(answers[0]["topic_type"])
result, found = frontend.ansi.visualize(answer_data, request_options)
return (
_render_html(
query, result, editable, repository_button, topics_list, request_options
),
found,
)
def _github_button(topic_type):
full_name = GITHUB_REPOSITORY.get(topic_type, "")
if not full_name:
return ""
short_name = full_name.split("/", 1)[1] # pylint: disable=unused-variable
button = (
"<!-- Place this tag where you want the button to render. -->"
'<a aria-label="Star %(full_name)s on GitHub"'
' data-count-aria-label="# stargazers on GitHub"'
' data-count-api="/repos/%(full_name)s#stargazers_count"'
' data-count-href="/%(full_name)s/stargazers"'
' data-icon="octicon-star"'
' href="https://github.com/%(full_name)s"'
' class="github-button">%(short_name)s</a>'
) % locals()
return button
def _render_html(
query, result, editable, repository_button, topics_list, request_options
):
def _html_wrapper(data):
cmd = [
"bash",
CONFIG["path.internal.ansi2html"],
"--palette=solarized",
"--bg=dark",
]
try:
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
except FileNotFoundError:
print("ERROR: %s" % cmd)
raise
data = data.encode("utf-8")
stdout, stderr = proc.communicate(data)
if proc.returncode != 0:
error((stdout + stderr).decode("utf-8"))
return stdout.decode("utf-8")
result = result + "\n$"
result = _html_wrapper(result)
title = "<title>cheat.sh/%s</title>" % query
submit_button = (
'<input type="submit" style="position: absolute;'
' left: -9999px; width: 1px; height: 1px;" tabindex="-1" />'
)
topic_list = '<datalist id="topics">%s</datalist>' % (
"\n".join("<option value='%s'></option>" % x for x in topics_list)
)
curl_line = "<span class='pre'>$ curl cheat.sh/</span>"
if query == ":firstpage":
query = ""
form_html = (
'<form action="/" method="GET">'
"%s%s"
"<input"
' type="text" value="%s" name="topic"'
' list="topics" autofocus autocomplete="off"/>'
"%s"
"</form>"
) % (submit_button, curl_line, query, topic_list)
edit_button = ""
if editable:
# It's possible that topic directory starts with omitted underscore
if "/" in query:
query = "_" + query
edit_page_link = (
"https://github.com/chubin/cheat.sheets/edit/master/sheets/" + query
)
edit_button = (
'<pre style="position:absolute;padding-left:40em;overflow:visible;height:0;">'
'[<a href="%s" style="color:cyan">edit</a>]'
"</pre>"
) % edit_page_link
result = re.sub("<pre>", edit_button + form_html + "<pre>", result)
result = re.sub("<head>", "<head>" + title, result)
if not request_options.get("quiet"):
result = result.replace(
"</body>",
TWITTER_BUTTON
+ GITHUB_BUTTON
+ repository_button
+ GITHUB_BUTTON_FOOTER
+ "</body>",
)
return result | --- +++ @@ -1,3 +1,9 @@+"""
+
+Configuration parameters:
+
+ path.internal.ansi2html
+"""
import sys
import os
@@ -73,6 +79,9 @@ ):
def _html_wrapper(data):
+ """
+ Convert ANSI text `data` to HTML
+ """
cmd = [
"bash",
CONFIG["path.internal.ansi2html"],
@@ -138,4 +147,4 @@ + GITHUB_BUTTON_FOOTER
+ "</body>",
)
- return result+ return result
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/frontend/html.py |
Write proper docstrings for these functions |
# pylint: disable=relative-import,abstract-method
import re
import os
from .git_adapter import GitRepositoryAdapter
class Tldr(GitRepositoryAdapter):
_adapter_name = "tldr"
_output_format = "code"
_cache_needed = True
_repository_url = "https://github.com/tldr-pages/tldr"
_cheatsheet_files_prefix = "pages/*/"
_cheatsheet_files_extension = ".md"
@staticmethod
def _format_page(text):
answer = []
skip_empty = False
header = 2
for line in text.splitlines():
if line.strip() == "":
if skip_empty and not header:
continue
if header == 1:
header = 0
if header:
continue
else:
skip_empty = False
if line.startswith("-"):
line = "# " + line[2:]
skip_empty = True
elif line.startswith("> "):
if header == 2:
header = 1
line = "# " + line[2:]
skip_empty = True
elif line.startswith("`") and line.endswith("`"):
line = line[1:-1]
line = re.sub(r"{{(.*?)}}", r"\1", line)
answer.append(line)
return "\n".join(answer)
def _get_page(self, topic, request_options=None):
search_order = ["common", "linux", "osx", "sunos", "windows", "android"]
local_rep = self.local_repository_location()
ext = self._cheatsheet_files_extension
filename = None
for subdir in search_order:
_filename = os.path.join(local_rep, "pages", subdir, "%s%s" % (topic, ext))
if os.path.exists(_filename):
filename = _filename
break
if filename:
answer = self._format_page(open(filename, "r").read())
else:
# though it should not happen
answer = ""
return answer
@classmethod
def get_updates_list(cls, updated_files_list):
answer = []
ext = cls._cheatsheet_files_extension
for entry in updated_files_list:
if entry.endswith(ext):
answer.append(entry.split("/")[-1][: -len(ext)])
return answer | --- +++ @@ -1,3 +1,11 @@+"""
+Adapter for https://github.com/cheat/cheat
+
+Cheatsheets are located in `pages/*/`
+Each cheat sheet is a separate file with extension .md
+
+The pages are formatted with a markdown dialect
+"""
# pylint: disable=relative-import,abstract-method
@@ -8,6 +16,9 @@
class Tldr(GitRepositoryAdapter):
+ """
+ tldr-pages/tldr adapter
+ """
_adapter_name = "tldr"
_output_format = "code"
@@ -18,6 +29,13 @@
@staticmethod
def _format_page(text):
+ """
+ Trivial tldr Markdown implementation.
+
+ * Header goes until the first empty line after > prefixed lines.
+ * code surrounded with `` => code
+ * {{var}} => var
+ """
answer = []
skip_empty = False
@@ -50,6 +68,10 @@ return "\n".join(answer)
def _get_page(self, topic, request_options=None):
+ """
+ Go through pages/{common,linux,osx,sunos,windows}/
+ and as soon as anything is found, format and return it.
+ """
search_order = ["common", "linux", "osx", "sunos", "windows", "android"]
local_rep = self.local_repository_location()
@@ -72,10 +94,14 @@
@classmethod
def get_updates_list(cls, updated_files_list):
+ """
+ If a .md file was updated, invalidate cache
+ entry with the name of this file
+ """
answer = []
ext = cls._cheatsheet_files_extension
for entry in updated_files_list:
if entry.endswith(ext):
answer.append(entry.split("/")[-1][: -len(ext)])
- return answer+ return answer
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/adapter/tldr.py |
Add docstrings to clarify complex logic |
from __future__ import print_function
import sys
import textwrap
try:
import urlparse
except ModuleNotFoundError:
import urllib.parse as urlparse
import config
config.CONFIG["cache.type"] = "none"
import cheat_wrapper
import options
def show_usage():
print(
textwrap.dedent(
"""
Usage:
lib/standalone.py [OPTIONS] QUERY
For OPTIONS see :help
"""
)[1:-1]
)
def parse_cmdline(args):
if not args:
show_usage()
sys.exit(0)
query_string = " ".join(args)
parsed = urlparse.urlparse("https://srv:0/%s" % query_string)
request_options = options.parse_args(
urlparse.parse_qs(parsed.query, keep_blank_values=True)
)
query = parsed.path.lstrip("/")
if not query:
query = ":firstpage"
return query, request_options
def main(args):
query, request_options = parse_cmdline(args)
answer, _ = cheat_wrapper.cheat_wrapper(query, request_options=request_options)
sys.stdout.write(answer)
if __name__ == "__main__":
main(sys.argv[1:]) | --- +++ @@ -1,3 +1,6 @@+"""
+Standalone wrapper for the cheat.sh server.
+"""
from __future__ import print_function
@@ -18,6 +21,9 @@
def show_usage():
+ """
+ Show how to use the program in the standalone mode
+ """
print(
textwrap.dedent(
@@ -33,6 +39,10 @@
def parse_cmdline(args):
+ """
+ Parses command line arguments and returns
+ query and request_options
+ """
if not args:
show_usage()
@@ -52,6 +62,9 @@
def main(args):
+ """
+ standalone wrapper for cheat_wrapper()
+ """
query, request_options = parse_cmdline(args)
answer, _ = cheat_wrapper.cheat_wrapper(query, request_options=request_options)
@@ -59,4 +72,4 @@
if __name__ == "__main__":
- main(sys.argv[1:])+ main(sys.argv[1:])
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/standalone.py |
Annotate my code with docstrings |
# pylint: disable=unused-argument,abstract-method
import os.path
import re
from subprocess import Popen, PIPE
from .adapter import Adapter
def _get_abspath(path):
if path.startswith("/"):
return path
import __main__
return os.path.join(os.path.dirname(os.path.dirname(__main__.__file__)), path)
class CommandAdapter(Adapter):
_command = []
def _get_command(self, topic, request_options=None):
return self._command
def _get_page(self, topic, request_options=None):
cmd = self._get_command(topic, request_options=request_options)
if cmd:
try:
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
answer = proc.communicate()[0].decode("utf-8", "ignore")
except OSError:
return (
'ERROR of the "%s" adapter: please create an issue'
% self._adapter_name
)
return answer
return ""
class Fosdem(CommandAdapter):
_adapter_name = "fosdem"
_output_format = "ansi"
_pages_list = [":fosdem"]
_command = ["sudo", "/usr/local/bin/current-fosdem-slide"]
class Translation(CommandAdapter):
_adapter_name = "translation"
_output_format = "text"
_cache_needed = True
def _get_page(self, topic, request_options=None):
from_, topic = topic.split("/", 1)
to_ = request_options.get("lang", "en")
if "-" in from_:
from_, to_ = from_.split("-", 1)
return [
"/home/igor/cheat.sh/bin/get_translation",
from_,
to_,
topic.replace("+", " "),
]
class AdapterRfc(CommandAdapter):
_adapter_name = "rfc"
_output_format = "text"
_cache_needed = True
_command = ["share/adapters/rfc.sh"]
def _get_command(self, topic, request_options=None):
cmd = self._command[:]
if not cmd[0].startswith("/"):
cmd[0] = _get_abspath(cmd[0])
# cut rfc/ off
if topic.startswith("rfc/"):
topic = topic[4:]
return cmd + [topic]
def _get_list(self, prefix=None):
return list("rfc/%s" % x for x in range(1, 8649))
def is_found(self, topic):
return True
class AdapterOeis(CommandAdapter):
_adapter_name = "oeis"
_output_format = "text+code"
_cache_needed = True
_command = ["share/adapters/oeis.sh"]
@staticmethod
def _get_filetype(topic):
if "/" in topic:
language = topic.split("/")[-1].lower()
return language
return "bash"
def _get_command(self, topic, request_options=None):
cmd = self._command[:]
if not cmd[0].startswith("/"):
cmd[0] = _get_abspath(cmd[0])
# cut oeis/ off
# Replace all non (alphanumeric, '-', ':') chars with Spaces to delimit args to oeis.sh
if topic.startswith("oeis/"):
topic = topic[5:]
suffix = ""
if topic.endswith("/:list"):
suffix = " :list"
topic = topic[:-6]
topic = re.sub("[^a-zA-Z0-9-:]+", " ", topic) + suffix
return cmd + [topic]
def is_found(self, topic):
return True
class AdapterChmod(CommandAdapter):
_adapter_name = "chmod"
_output_format = "text"
_cache_needed = True
_command = ["share/adapters/chmod.sh"]
def _get_command(self, topic, request_options=None):
cmd = self._command[:]
# cut chmod/ off
# remove all non (alphanumeric, '-') chars
if topic.startswith("chmod/"):
topic = topic[6:]
topic = re.sub("[^a-zA-Z0-9-]", "", topic)
return cmd + [topic]
def is_found(self, topic):
return True | --- +++ @@ -1,3 +1,4 @@+""" """
# pylint: disable=unused-argument,abstract-method
@@ -9,6 +10,9 @@
def _get_abspath(path):
+ """Find absolute path of the specified `path`
+ according to its
+ """
if path.startswith("/"):
return path
@@ -19,6 +23,7 @@
class CommandAdapter(Adapter):
+ """ """
_command = []
@@ -41,6 +46,21 @@
class Fosdem(CommandAdapter):
+ """
+ Show the output of the `current-fosdem-slide` command,
+ which shows the current slide open in some terminal.
+ This was used during the talk at FOSDEM 2019.
+
+ https://www.youtube.com/watch?v=PmiK0JCdh5A
+
+ `sudo` is used here because the session was running under
+ a different user; to be able to use the command via sudo,
+ the following `/etc/suders` entry was added:
+
+ srv ALL=(ALL:ALL) NOPASSWD: /usr/local/bin/current-fosdem-slide
+
+ Here `srv` is the user under which the cheat.sh server was running
+ """
_adapter_name = "fosdem"
_output_format = "ansi"
@@ -49,6 +69,7 @@
class Translation(CommandAdapter):
+ """ """
_adapter_name = "translation"
_output_format = "text"
@@ -69,6 +90,10 @@
class AdapterRfc(CommandAdapter):
+ """
+ Show RFC by its number.
+ Exported as: "/rfc/NUMBER"
+ """
_adapter_name = "rfc"
_output_format = "text"
@@ -94,6 +119,10 @@
class AdapterOeis(CommandAdapter):
+ """
+ Show OEIS by its number.
+ Exported as: "/oeis/NUMBER"
+ """
_adapter_name = "oeis"
_output_format = "text+code"
@@ -131,6 +160,10 @@
class AdapterChmod(CommandAdapter):
+ """
+ Show chmod numeric values and strings
+ Exported as: "/chmod/NUMBER"
+ """
_adapter_name = "chmod"
_output_format = "text"
@@ -149,4 +182,4 @@ return cmd + [topic]
def is_found(self, topic):
- return True+ return True
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/adapter/cmd.py |
Add clean documentation to messy code |
from __future__ import print_function
import sys
import logging
import os
import subprocess
import textwrap
from globals import fatal
import adapter
import cache
from config import CONFIG
def _log(*message):
logging.info(*message)
if len(message) > 1:
message = message[0].rstrip("\n") % tuple(message[1:])
else:
message = message[0].rstrip("\n")
sys.stdout.write(message + "\n")
def _run_cmd(cmd):
shell = isinstance(cmd, str)
process = subprocess.Popen(
cmd, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
output = process.communicate()[0]
return process.returncode, output
def fetch_all(skip_existing=True):
def _fetch_locations(known_location):
for location, adptr in known_location.items():
if location in existing_locations:
continue
cmd = adptr.fetch_command()
if not cmd:
continue
sys.stdout.write("Fetching %s..." % (adptr))
sys.stdout.flush()
try:
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
except OSError:
print("\nERROR: %s" % cmd)
raise
output = process.communicate()[0]
if process.returncode != 0:
sys.stdout.write("\nERROR:\n---\n" + output)
fatal("---\nCould not fetch %s" % adptr)
else:
print("Done")
# Searching for location duplicates for different repositories
known_location = {}
for adptr in adapter.adapter.all_adapters():
location = adptr.local_repository_location()
if not location:
continue
if (
location in known_location
and adptr.repository_url() != known_location[location].repository_url()
):
fatal(
"Duplicate location: %s for %s and %s"
% (location, adptr, known_location[location])
)
known_location[location] = adptr
# Parent directories creation
# target subdirectories will be create during the checkout process,
# but the parent directories should be created explicitly.
# Also we should make sure, that the target directory does not exist
existing_locations = []
for location in known_location:
if os.path.exists(location):
if skip_existing:
existing_locations.append(location)
print("Already exists %s" % (location))
else:
fatal("%s already exists" % location)
parent = os.path.dirname(location)
if os.path.exists(parent):
continue
os.makedirs(parent)
known_location = {
k: v for k, v in known_location.items() if k not in existing_locations
}
_fetch_locations(known_location)
def _update_adapter(adptr):
os.chdir(adptr.local_repository_location())
cmd = adptr.update_command()
if not cmd:
return True
errorcode, output = _run_cmd(cmd)
if errorcode:
_log(
"\nERROR:\n---%s\n" % output.decode("utf-8")
+ "\n---\nCould not update %s" % adptr
)
return False
# Getting current repository state
# This state will be saved after the update procedure is finished
# (all cache entries invalidated)
cmd = adptr.current_state_command()
state = None
if cmd:
errorcode, state = _run_cmd(cmd)
if errorcode:
_log(
"\nERROR:\n---\n"
+ state
+ "\n---\nCould not get repository state: %s" % adptr
)
return False
state = state.strip()
# Getting list of files that were changed
# that will be later converted to the list of the pages to be invalidated
cmd = adptr.get_updates_list_command()
updates = []
if cmd:
errorcode, output = _run_cmd(cmd)
output = output.decode("utf-8")
if errorcode:
_log(
"\nERROR:\n---\n"
+ output
+ "\n---\nCould not get list of pages to be updated: %s" % adptr
)
return False
updates = output.splitlines()
entries = adptr.get_updates_list(updates)
if entries:
_log("%s Entries to be updated: %s", adptr, len(entries))
name = adptr.name()
for entry in entries:
cache_name = name + ":" + entry
_log("+ invalidating %s", cache_name)
cache.delete(cache_name)
if entries:
_log("Done")
adptr.save_state(state)
return True
def update_all():
for adptr in adapter.adapter.all_adapters():
location = adptr.local_repository_location()
if not location:
continue
if not os.path.exists(location):
continue
_update_adapter(adptr)
def update_by_name(name):
pass
def _show_usage():
sys.stdout.write(
textwrap.dedent(
"""
Usage:
python lib/fetch.py [command]
Commands:
update-all -- update all configured repositories
update [name] -- update repository of the adapter `name`
fetch-all -- fetch all configured repositories
"""
)
)
def main(args):
if not args:
_show_usage()
sys.exit(0)
logdir = os.path.dirname(CONFIG["path.log.fetch"])
if not os.path.exists(logdir):
os.makedirs(logdir)
logging.basicConfig(
filename=CONFIG["path.log.fetch"],
level=logging.DEBUG,
format="%(asctime)s %(message)s",
)
if args[0] == "fetch-all":
fetch_all()
elif args[0] == "update":
update_by_name(sys.argv[1])
elif args[0] == "update-all":
update_all()
else:
_show_usage()
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[1:]) | --- +++ @@ -1,3 +1,14 @@+"""
+Repositories fetch and update
+
+This module makes real network and OS interaction,
+and the adapters only say how exctly this interaction
+should be done.
+
+Configuration parameters:
+
+ * path.log.fetch
+"""
from __future__ import print_function
@@ -34,6 +45,9 @@
def fetch_all(skip_existing=True):
+ """
+ Fetch all known repositories mentioned in the adapters
+ """
def _fetch_locations(known_location):
for location, adptr in known_location.items():
@@ -105,6 +119,11 @@
def _update_adapter(adptr):
+ """
+ Update implementation.
+
+ If `adptr` returns no update_command(), it is being ignored.
+ """
os.chdir(adptr.local_repository_location())
cmd = adptr.update_command()
@@ -169,6 +188,11 @@
def update_all():
+ """
+ Update all known repositories, mentioned in the adapters
+ and fetched locally.
+ If repository is not fetched, it is skipped.
+ """
for adptr in adapter.adapter.all_adapters():
location = adptr.local_repository_location()
@@ -181,6 +205,9 @@
def update_by_name(name):
+ """
+ Find adapter by its `name` and update only it.
+ """
pass
@@ -204,6 +231,9 @@
def main(args):
+ """
+ function for the initial repositories fetch and manual repositories updates
+ """
if not args:
_show_usage()
@@ -231,4 +261,4 @@
if __name__ == "__main__":
- main(sys.argv[1:])+ main(sys.argv[1:])
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/fetch.py |
Create docstrings for all classes and functions |
# pylint: disable=relative-import
import os
import glob
from .git_adapter import GitRepositoryAdapter
def _remove_initial_underscore(filename):
if filename.startswith("_"):
filename = filename[1:]
return filename
def _sanitize_dirnames(filename, restore=False):
parts = filename.split("/")
newparts = []
for part in parts[:-1]:
if restore:
newparts.append("_" + part)
continue
if part.startswith("_"):
newparts.append(part[1:])
else:
newparts.append(part)
newparts.append(parts[-1])
return "/".join(newparts)
class CheatSheets(GitRepositoryAdapter):
_adapter_name = "cheat.sheets"
_output_format = "code"
_repository_url = "https://github.com/chubin/cheat.sheets"
_cheatsheet_files_prefix = "sheets/"
def _get_list(self, prefix=None):
hidden_files = ["_info.yaml"]
answer = []
prefix = os.path.join(
self.local_repository_location(), self._cheatsheet_files_prefix
)
for mask in ["*", "*/*"]:
template = os.path.join(prefix, mask)
answer += [
_sanitize_dirnames(f_name[len(prefix) :])
for f_name in glob.glob(template)
if not os.path.isdir(f_name)
and os.path.basename(f_name) not in hidden_files
]
return sorted(answer)
def _get_page(self, topic, request_options=None):
filename = os.path.join(
self.local_repository_location(),
self._cheatsheet_files_prefix,
_sanitize_dirnames(topic, restore=True),
)
if os.path.exists(filename):
answer = self._format_page(open(filename, "r").read())
else:
# though it should not happen
answer = "%s:%s not found" % (str(self.__class__), topic)
return answer
class CheatSheetsDir(CheatSheets):
_adapter_name = "cheat.sheets dir"
_output_format = "text"
def _get_list(self, prefix=None):
template = os.path.join(
self.local_repository_location(), self._cheatsheet_files_prefix, "*"
)
answer = sorted(
[
_remove_initial_underscore(os.path.basename(f_name)) + "/"
for f_name in glob.glob(template)
if os.path.isdir(f_name)
]
)
return answer
def _get_page(self, topic, request_options=None):
template = os.path.join(
self.local_repository_location(),
self._cheatsheet_files_prefix,
topic.rstrip("/"),
"*",
)
answer = sorted([os.path.basename(f_name) for f_name in glob.glob(template)])
return "\n".join(answer) + "\n"
def is_found(self, topic):
return CheatSheets.is_found(self, topic.rstrip("/")) | --- +++ @@ -1,3 +1,8 @@+"""
+Implementation of the adapter for the native cheat.sh cheat sheets repository,
+cheat.sheets. The cheat sheets repository is hierarchically structured: cheat
+sheets covering programming languages are are located in subdirectories.
+"""
# pylint: disable=relative-import
@@ -14,6 +19,11 @@
def _sanitize_dirnames(filename, restore=False):
+ """
+ Remove (or add) leading _ in the directories names in `filename`
+ The `restore` param means that the path name should be restored from the queryname,
+ i.e. conversion should be done in the opposite direction
+ """
parts = filename.split("/")
newparts = []
for part in parts[:-1]:
@@ -30,6 +40,9 @@
class CheatSheets(GitRepositoryAdapter):
+ """
+ Adapter for the cheat.sheets cheat sheets.
+ """
_adapter_name = "cheat.sheets"
_output_format = "code"
@@ -37,6 +50,10 @@ _cheatsheet_files_prefix = "sheets/"
def _get_list(self, prefix=None):
+ """
+ Return all files on the first and the second level,
+ excluding directories and hidden files
+ """
hidden_files = ["_info.yaml"]
answer = []
@@ -73,6 +90,14 @@
class CheatSheetsDir(CheatSheets):
+ """
+ Adapter for the cheat sheets directories.
+ Provides pages named according to subdirectories:
+ _dir => dir/
+
+ (currently only _get_list() is used; _get_page is shadowed
+ by the CheatSheets adapter)
+ """
_adapter_name = "cheat.sheets dir"
_output_format = "text"
@@ -94,6 +119,9 @@ return answer
def _get_page(self, topic, request_options=None):
+ """
+ Content of the `topic` dir is the list of the pages in the dir
+ """
template = os.path.join(
self.local_repository_location(),
@@ -106,4 +134,4 @@ return "\n".join(answer) + "\n"
def is_found(self, topic):
- return CheatSheets.is_found(self, topic.rstrip("/"))+ return CheatSheets.is_found(self, topic.rstrip("/"))
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/adapter/cheat_sheets.py |
Add detailed documentation for each class |
# pylint: disable=relative-import
import textwrap
import requests
from config import CONFIG
from .adapter import Adapter
def _are_you_offline():
return textwrap.dedent(
"""
.
Are you offline?
_________________
| | ___________ |o| Though it could be theoretically possible
| | ___________ | | to use cheat.sh fully offline,
| | ___________ | | and for *the programming languages questions* too,
| | ___________ | | this very feature is not yet implemented.
| |_____________| |
| _______ | If you find it useful, please visit
| | | || https://github.com/chubin/issues/140
| DD | | V| and drop a couple of lines to encourage
|____|_______|____| the authors to develop it as soon as possible
.
"""
)
class UpstreamAdapter(Adapter):
_adapter_name = "upstream"
_output_format = "ansi"
_cache_needed = False
def _get_page(self, topic, request_options=None):
options_string = "&".join(
["%s=%s" % (x, y) for (x, y) in request_options.items()]
)
url = (
CONFIG["upstream.url"].rstrip("/")
+ "/"
+ topic.lstrip("/")
+ "?"
+ options_string
)
try:
response = requests.get(url, timeout=CONFIG["upstream.timeout"])
answer = {"cache": False, "answer": response.text}
except requests.exceptions.ConnectionError:
answer = {"cache": False, "answer": _are_you_offline()}
return answer
def _get_list(self, prefix=None):
return [] | --- +++ @@ -1,3 +1,11 @@+"""
+Adapter for an external cheat sheets service (i.e. for cheat.sh)
+
+Configuration parameters:
+
+ upstream.url
+ upstream.timeout
+"""
# pylint: disable=relative-import
@@ -30,6 +38,15 @@
class UpstreamAdapter(Adapter):
+ """
+ Connect to the upstream server `CONFIG["upstream.url"]` and fetch
+ response from it. The response is supposed to have the "ansi" format.
+ If the server does not respond within `CONFIG["upstream.timeout"]` seconds,
+ or if a connection error occurs, the "are you offline" banner is displayed.
+
+ Answers are by default cached; the failure answer is marked with the no-cache
+ property ("cache": False).
+ """
_adapter_name = "upstream"
_output_format = "ansi"
@@ -55,4 +72,4 @@ return answer
def _get_list(self, prefix=None):
- return []+ return []
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/adapter/upstream.py |
Add verbose docstrings with examples |
import os
import sys
import re
import colored
from pygments import highlight as pygments_highlight
from pygments.formatters import (
Terminal256Formatter,
) # pylint: disable=no-name-in-module
# pylint: disable=wrong-import-position
sys.path.append(os.path.abspath(os.path.join(__file__, "..")))
from config import CONFIG
import languages_data # pylint: enable=wrong-import-position
import fmt.internal
import fmt.comments
def visualize(answer_data, request_options):
answers = answer_data["answers"]
return _visualize(
answers, request_options, search_mode=bool(answer_data["keyword"])
)
ANSI_ESCAPE = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]")
def remove_ansi(sometext):
return ANSI_ESCAPE.sub("", sometext)
def _limited_answer(answer):
return (
colored.bg("dark_goldenrod")
+ colored.fg("yellow_1")
+ " "
+ answer
+ " "
+ colored.attr("reset")
+ "\n"
)
def _colorize_ansi_answer(
topic,
answer,
color_style, # pylint: disable=too-many-arguments
highlight_all=True,
highlight_code=False,
unindent_code=False,
language=None,
):
color_style = color_style or "native"
lexer_class = languages_data.LEXER["bash"]
if "/" in topic:
if language is None:
section_name = topic.split("/", 1)[0].lower()
else:
section_name = language
section_name = languages_data.get_lexer_name(section_name)
lexer_class = languages_data.LEXER.get(section_name, lexer_class)
if section_name == "php":
answer = "<?\n%s?>\n" % answer
if highlight_all:
highlight = (
lambda answer: pygments_highlight(
answer, lexer_class(), Terminal256Formatter(style=color_style)
).strip("\n")
+ "\n"
)
else:
highlight = lambda x: x
if highlight_code:
blocks = fmt.comments.code_blocks(
answer, wrap_lines=True, unindent_code=(4 if unindent_code else False)
)
highlighted_blocks = []
for block in blocks:
if block[0] == 1:
this_block = highlight(block[1])
else:
this_block = block[1].strip("\n") + "\n"
highlighted_blocks.append(this_block)
result = "\n".join(highlighted_blocks)
else:
result = highlight(answer).lstrip("\n")
return result
def _visualize(answers, request_options, search_mode=False):
highlight = not bool(request_options and request_options.get("no-terminal"))
color_style = (request_options or {}).get("style", "")
if color_style not in CONFIG["frontend.styles"]:
color_style = ""
# if there is more than one answer,
# show the source of the answer
multiple_answers = len(answers) > 1
found = True
result = ""
for answer_dict in answers:
topic = answer_dict["topic"]
topic_type = answer_dict["topic_type"]
answer = answer_dict["answer"]
found = found and not topic_type == "unknown"
if multiple_answers and topic != "LIMITED":
section_name = f"{topic_type}:{topic}"
if not highlight:
result += f"#[{section_name}]\n"
else:
result += "".join(
[
"\n",
colored.bg("dark_gray"),
colored.attr("res_underlined"),
f" {section_name} ",
colored.attr("res_underlined"),
colored.attr("reset"),
"\n",
]
)
if answer_dict["format"] in ["ansi", "text"]:
result += answer
elif topic == ":firstpage-v1":
result += fmt.internal.colorize_internal_firstpage_v1(answer)
elif topic == "LIMITED":
result += _limited_answer(topic)
else:
result += _colorize_ansi_answer(
topic,
answer,
color_style,
highlight_all=highlight,
highlight_code=(
topic_type == "question"
and not request_options.get("add_comments")
and not request_options.get("remove_text")
),
language=answer_dict.get("filetype"),
)
if request_options.get("no-terminal"):
result = remove_ansi(result)
result = result.strip("\n") + "\n"
return result, found | --- +++ @@ -1,3 +1,26 @@+"""
+ANSI frontend.
+
+Exports:
+ visualize(answer_data, request_options)
+
+Format:
+ answer_data = {
+ 'answers': '...',}
+
+ answers = [answer,...]
+
+ answer = {
+ 'topic': '...',
+ 'topic_type': '...',
+ 'answer': '...',
+ 'format': 'ansi|code|markdown|text...',
+ }
+
+Configuration parameters:
+
+ frontend.styles
+"""
import os
import sys
@@ -19,6 +42,9 @@
def visualize(answer_data, request_options):
+ """
+ Renders `answer_data` as ANSI output.
+ """
answers = answer_data["answers"]
return _visualize(
answers, request_options, search_mode=bool(answer_data["keyword"])
@@ -29,6 +55,9 @@
def remove_ansi(sometext):
+ """
+ Remove ANSI sequences from `sometext` and convert it into plaintext.
+ """
return ANSI_ESCAPE.sub("", sometext)
@@ -155,4 +184,4 @@ result = remove_ansi(result)
result = result.strip("\n") + "\n"
- return result, found+ return result, found
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/frontend/ansi.py |
Include argument descriptions in docstrings |
import re
import ansiwrap
import colored
def format_text(text, config=None, highlighter=None):
return _format_section(text, config=config, highlighter=highlighter)
def _split_into_paragraphs(text):
return re.split("\n\n+", text)
def _colorize(text):
return re.sub(
r"`(.*?)`",
colored.bg("dark_gray")
+ colored.fg("white")
+ " "
+ r"\1"
+ " "
+ colored.attr("reset"),
re.sub(
r"\*\*(.*?)\*\*",
colored.attr("bold") + colored.fg("white") + r"\1" + colored.attr("reset"),
text,
),
)
def _format_section(section_text, config=None, highlighter=None):
answer = ""
# cut code blocks
block_number = 0
while True:
section_text, replacements = re.subn(
"^```.*?^```",
"MULTILINE_BLOCK_%s" % block_number,
section_text,
1,
flags=re.S | re.MULTILINE,
)
block_number += 1
if not replacements:
break
# cut links
links = []
while True:
regexp = re.compile(r"\[(.*?)\]\((.*?)\)")
match = regexp.search(section_text)
if match:
links.append(match.group(0))
text = match.group(1)
# links are not yet supported
#
text = "\x1b]8;;%s\x1b\\\\%s\x1b]8;;\x1b\\\\" % (
match.group(2),
match.group(1),
)
else:
break
section_text, replacements = regexp.subn(
text, section_text, 1 # 'LINK_%s' % len(links),
)
block_number += 1
if not replacements:
break
for paragraph in _split_into_paragraphs(section_text):
answer += (
"\n".join(
ansiwrap.fill(_colorize(line)) + "\n" for line in paragraph.splitlines()
)
+ "\n"
)
return {"ansi": answer, "links": links} | --- +++ @@ -1,3 +1,11 @@+"""
+Markdown support.
+
+Exports:
+ format_text(text, config=None, highlighter=None):
+
+Uses external pygments formatters for highlighting (passed as an argument).
+"""
import re
import ansiwrap
@@ -5,6 +13,11 @@
def format_text(text, config=None, highlighter=None):
+ """
+ Renders `text` according to markdown rules.
+ Uses `highlighter` for syntax highlighting.
+ Returns a dictionary with "output" and "links".
+ """
return _format_section(text, config=config, highlighter=highlighter)
@@ -79,4 +92,4 @@ + "\n"
)
- return {"ansi": answer, "links": links}+ return {"ansi": answer, "links": links}
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/fmt/markdown.py |
Write docstrings describing each step |
import re
from colorama import Fore, Back, Style
import colored
PALETTES = {
0: {
1: Fore.WHITE,
2: Style.DIM,
},
1: {
1: Fore.CYAN,
2: Fore.GREEN,
3: colored.fg("orange_3"),
4: Style.DIM,
5: Style.DIM,
},
2: {
1: Fore.RED,
2: Style.DIM,
},
}
def _reverse_palette(code):
return {1: Fore.BLACK + _back_color(code), 2: Style.DIM}
def _back_color(code):
if code == 0 or (isinstance(code, str) and code.lower() == "white"):
return Back.WHITE
if code == 1 or (isinstance(code, str) and code.lower() == "cyan"):
return Back.CYAN
if code == 2 or (isinstance(code, str) and code.lower() == "red"):
return Back.RED
return Back.WHITE
def colorize_internal(text, palette_number=1):
palette = PALETTES[palette_number]
palette_reverse = _reverse_palette(palette_number)
def _process_text(text):
text = text.group()[1:-1]
factor = 1
if text.startswith("-"):
text = text[1:]
factor = -1
stripped = text.lstrip("0123456789")
return (text, stripped, factor)
def _extract_color_number(text, stripped, factor=1):
return int(text[: len(text) - len(stripped)]) * factor
def _colorize_curlies_block(text):
text, stripped, factor = _process_text(text)
color_number = _extract_color_number(text, stripped, factor)
if stripped.startswith("="):
stripped = stripped[1:]
reverse = color_number < 0
if reverse:
color_number = -color_number
if reverse:
stripped = palette_reverse[color_number] + stripped + Style.RESET_ALL
else:
stripped = palette[color_number] + stripped + Style.RESET_ALL
return stripped
def _colorize_headers(text):
if text.group(0).endswith("\n"):
newline = "\n"
else:
newline = ""
color_number = 3
return palette[color_number] + text.group(0).strip() + Style.RESET_ALL + newline
text = re.sub("{.*?}", _colorize_curlies_block, text)
text = re.sub("#(.*?)\n", _colorize_headers, text)
return text
def colorize_internal_firstpage_v1(answer):
def _colorize_line(line):
if line.startswith("T"):
line = colored.fg("grey_62") + line + colored.attr("reset")
line = re.sub(
r"\{(.*?)\}",
colored.fg("orange_3") + r"\1" + colored.fg("grey_35"),
line,
)
return line
line = re.sub(
r"\[(F.*?)\]",
colored.bg("black") + colored.fg("cyan") + r"[\1]" + colored.attr("reset"),
line,
)
line = re.sub(
r"\[(g.*?)\]",
colored.bg("dark_gray")
+ colored.fg("grey_0")
+ r"[\1]"
+ colored.attr("reset"),
line,
)
line = re.sub(
r"\{(.*?)\}", colored.fg("orange_3") + r"\1" + colored.attr("reset"), line
)
line = re.sub(
r"<(.*?)>", colored.fg("cyan") + r"\1" + colored.attr("reset"), line
)
return line
lines = answer.splitlines()
answer_lines = lines[:9]
answer_lines.append(colored.fg("grey_35") + lines[9] + colored.attr("reset"))
for line in lines[10:]:
answer_lines.append(_colorize_line(line))
answer = "\n".join(answer_lines) + "\n"
return answer | --- +++ @@ -1,3 +1,7 @@+"""
+Colorize internal cheat sheets.
+Will be merged with panela later.
+"""
import re
@@ -39,6 +43,9 @@
def colorize_internal(text, palette_number=1):
+ """
+ Colorize `text`, use `palette`
+ """
palette = PALETTES[palette_number]
palette_reverse = _reverse_palette(palette_number)
@@ -88,6 +95,10 @@
def colorize_internal_firstpage_v1(answer):
+ """
+ Colorize "/:firstpage-v1".
+ Legacy.
+ """
def _colorize_line(line):
if line.startswith("T"):
@@ -127,4 +138,4 @@ answer_lines.append(_colorize_line(line))
answer = "\n".join(answer_lines) + "\n"
- return answer+ return answer
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/fmt/internal.py |
Write docstrings that follow conventions |
import pygments.lexers
LEXER = {
"assembly": pygments.lexers.NasmLexer,
"awk": pygments.lexers.AwkLexer,
"bash": pygments.lexers.BashLexer,
"basic": pygments.lexers.QBasicLexer,
"bf": pygments.lexers.BrainfuckLexer,
"chapel": pygments.lexers.ChapelLexer,
"clojure": pygments.lexers.ClojureLexer,
"coffee": pygments.lexers.CoffeeScriptLexer,
"cpp": pygments.lexers.CppLexer,
"c": pygments.lexers.CLexer,
"csharp": pygments.lexers.CSharpLexer,
"d": pygments.lexers.DLexer,
"dart": pygments.lexers.DartLexer,
"delphi": pygments.lexers.DelphiLexer,
"elisp": pygments.lexers.EmacsLispLexer,
"elixir": pygments.lexers.ElixirLexer,
"elm": pygments.lexers.ElmLexer,
"erlang": pygments.lexers.ErlangLexer,
"factor": pygments.lexers.FactorLexer,
"forth": pygments.lexers.ForthLexer,
"fortran": pygments.lexers.FortranLexer,
"fsharp": pygments.lexers.FSharpLexer,
"git": pygments.lexers.BashLexer,
"go": pygments.lexers.GoLexer,
"groovy": pygments.lexers.GroovyLexer,
"haskell": pygments.lexers.HaskellLexer,
"java": pygments.lexers.JavaLexer,
"js": pygments.lexers.JavascriptLexer,
"julia": pygments.lexers.JuliaLexer,
"kotlin": pygments.lexers.KotlinLexer,
"latex": pygments.lexers.TexLexer,
"lisp": pygments.lexers.CommonLispLexer,
"lua": pygments.lexers.LuaLexer,
"mathematica": pygments.lexers.MathematicaLexer,
"matlab": pygments.lexers.MatlabLexer,
"mongo": pygments.lexers.JavascriptLexer,
"nim": pygments.lexers.NimrodLexer,
"objective-c": pygments.lexers.ObjectiveCppLexer,
"ocaml": pygments.lexers.OcamlLexer,
"octave": pygments.lexers.OctaveLexer,
"perl": pygments.lexers.PerlLexer,
"perl6": pygments.lexers.Perl6Lexer,
"php": pygments.lexers.PhpLexer,
"psql": pygments.lexers.PostgresLexer,
"python": pygments.lexers.PythonLexer,
"python3": pygments.lexers.Python3Lexer,
"r": pygments.lexers.SLexer,
"racket": pygments.lexers.RacketLexer,
"ruby": pygments.lexers.RubyLexer,
"rust": pygments.lexers.RustLexer,
"solidity": pygments.lexers.JavascriptLexer,
"scala": pygments.lexers.ScalaLexer,
"scheme": pygments.lexers.SchemeLexer,
"psql": pygments.lexers.SqlLexer,
"sql": pygments.lexers.SqlLexer,
"swift": pygments.lexers.SwiftLexer,
"tcl": pygments.lexers.TclLexer,
"tcsh": pygments.lexers.TcshLexer,
"vb": pygments.lexers.VbNetLexer,
"vbnet": pygments.lexers.VbNetLexer,
"vim": pygments.lexers.VimLexer,
# experimental
"arduino": pygments.lexers.ArduinoLexer,
"pike": pygments.lexers.PikeLexer,
"eiffel": pygments.lexers.EiffelLexer,
"clean": pygments.lexers.CleanLexer,
"dylan": pygments.lexers.DylanLexer,
# not languages
"cmake": pygments.lexers.CMakeLexer,
"django": pygments.lexers.PythonLexer,
"flask": pygments.lexers.PythonLexer,
}
# canonical names are on the right side
LANGUAGE_ALIAS = {
"asm": "assembly",
"assembler": "assembly",
"c++": "cpp",
"c#": "csharp",
"clisp": "lisp",
"coffeescript": "coffee",
"cplusplus": "cpp",
"dlang": "d",
"f#": "fsharp",
"golang": "go",
"javascript": "js",
"objc": "objective-c",
"p6": "perl6",
"sh": "bash",
"visualbasic": "vb",
"vba": "vb",
"wolfram": "mathematica",
"mma": "mathematica",
"wolfram-mathematica": "mathematica",
"m": "octave",
}
VIM_NAME = {
"assembly": "asm",
"bash": "sh",
"coffeescript": "coffee",
"csharp": "cs",
"delphi": "pascal",
"dlang": "d",
"elisp": "newlisp",
"latex": "tex",
"forth": "fs",
"nim": "nimrod",
"perl6": "perl",
"python3": "python",
"python-3.x": "python",
"tcsh": "sh",
"solidity": "js",
"mathematica": "mma",
"wolfram-mathematica": "mma",
"psql": "sql",
# not languages
"cmake": "sh",
"git": "sh",
"django": "python",
"flask": "python",
}
SO_NAME = {
"coffee": "coffeescript",
"js": "javascript",
"python3": "python-3.x",
"vb": "vba",
"mathematica": "wolfram-mathematica",
}
#
# conversion of internal programmin language names
# into canonical cheat.sh names
#
ATOM_FT_NAME = {}
EMACS_FT_NAME = {
"asm-mode": "asm",
"awk-mode": "awk",
"sh-mode": "bash",
# basic
"brainfuck-mode": "bf",
# chapel
"clojure-mode": "clojure",
"coffee-mode": "coffee",
"c++-mode": "cpp",
"c-mode": "c",
"csharp-mode": "csharp",
"d-mode": "d",
"dart-mode": "dart",
"dylan-mode": "dylan",
"delphi-mode": "delphi",
"emacs-lisp-mode": "elisp",
# elixir
"elm-mode": "elm",
"erlang-mode": "erlang",
# factor
"forth-mode": "forth",
"fortran-mode": "fortran",
"fsharp-mode": "fsharp",
"go-mode": "go",
"groovy-mode": "groovy",
"haskell-mode": "haskell",
# "hy-mode"
"java-mode": "java",
"js-jsx-mode": "js",
"js-mode": "js",
"js2-jsx-mode": "js",
"js2-mode": "js",
"julia-mode": "julia",
"kotlin-mode": "kotlin",
"lisp-interaction-mode": "lisp",
"lisp-mode": "lisp",
"lua-mode": "lua",
# mathematica
"matlab-mode": "matlab",
# mongo
"objc-mode": "objective-c",
# ocaml
"perl-mode": "perl",
"perl6-mode": "perl6",
"php-mode": "php",
# psql
"python-mode": "python",
# python3
# r -- ess looks it, but I don't know the mode name off hand
"racket-mode": "racket",
"ruby-mode": "ruby",
"rust-mode": "rust",
"solidity-mode": "solidity",
"scala-mode": "scala",
"scheme-mode": "scheme",
"sql-mode": "sql",
"swift-mode": "swift",
"tcl-mode": "tcl",
# tcsh
"visual-basic-mode": "vb",
# vbnet
# vim
}
SUBLIME_FT_NAME = {}
VIM_FT_NAME = {
"asm": "assembler",
"javascript": "js",
"octave": "matlab",
}
VSCODE_FT_NAME = {}
def rewrite_editor_section_name(section_name):
if ":" not in section_name:
return section_name
editor_name, section_name = section_name.split(":", 1)
editor_name_mapping = {
"atom": ATOM_FT_NAME,
"emacs": EMACS_FT_NAME,
"sublime": SUBLIME_FT_NAME,
"vim": VIM_FT_NAME,
"vscode": VSCODE_FT_NAME,
}
if editor_name not in editor_name_mapping:
return section_name
return editor_name_mapping[editor_name].get(section_name, section_name)
def get_lexer_name(section_name):
if ":" in section_name:
section_name = rewrite_editor_section_name(section_name)
return LANGUAGE_ALIAS.get(section_name, section_name)
if __name__ == "__main__":
import doctest
doctest.testmod() | --- +++ @@ -1,3 +1,10 @@+"""
+
+Programming languages information.
+Will be (probably) moved to a separate file/directory
+from the project tree.
+
+"""
import pygments.lexers
@@ -218,6 +225,24 @@
def rewrite_editor_section_name(section_name):
+ """
+ section name cen be specified in form "editor:editor-filetype"
+ and it will be rewritten into form "filetype"
+ basing on the editor filetypes names data.
+ If editor name is unknown, it is just cut off: notepad:js => js
+
+ Known editors:
+ * atom
+ * vim
+ * emacs
+ * sublime
+ * vscode
+
+ >>> rewrite_editor_section_name('js')
+ 'js'
+ >>> rewrite_editor_section_name('vscode:js')
+ 'js'
+ """
if ":" not in section_name:
return section_name
@@ -235,6 +260,9 @@
def get_lexer_name(section_name):
+ """
+ Rewrite `section_name` for the further lexer search (for syntax highlighting)
+ """
if ":" in section_name:
section_name = rewrite_editor_section_name(section_name)
return LANGUAGE_ALIAS.get(section_name, section_name)
@@ -243,4 +271,4 @@ if __name__ == "__main__":
import doctest
- doctest.testmod()+ doctest.testmod()
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/languages_data.py |
Document functions with clear intent |
import re
from config import CONFIG
from routing import get_answers, get_topics_list
def _limited_entry():
return {
"topic_type": "LIMITED",
"topic": "LIMITED",
"answer": "LIMITED TO %s ANSWERS" % CONFIG["search.limit"],
"format": "code",
}
def _parse_options(options):
if options is None:
return {}
search_options = {
"insensitive": "i" in options,
"word_boundaries": "b" in options,
"recursive": "r" in options,
}
return search_options
def match(paragraph, keyword, options=None, options_dict=None):
if keyword is None:
return True
if "~" in keyword:
keywords = keyword.split("~")
else:
keywords = [keyword]
if options_dict is None:
options_dict = _parse_options(options)
for kwrd in keywords:
if not kwrd:
continue
regex = re.escape(kwrd)
if options_dict["word_boundaries"]:
regex = r"\b%s\b" % kwrd
if options_dict["insensitive"]:
if not re.search(regex, paragraph, re.IGNORECASE):
return False
else:
if not re.search(regex, paragraph):
return False
return True
def find_answers_by_keyword(directory, keyword, options="", request_options=None):
options_dict = _parse_options(options)
answers_found = []
for topic in get_topics_list(skip_internal=True, skip_dirs=True):
if not topic.startswith(directory):
continue
subtopic = topic[len(directory) :]
if not options_dict["recursive"] and "/" in subtopic:
continue
answer_dicts = get_answers(topic, request_options=request_options)
for answer_dict in answer_dicts:
answer_text = answer_dict.get("answer", "")
# Temporary hotfix:
# In some cases answer_text may be 'bytes' and not 'str'
if type(b"") == type(answer_text):
answer_text = answer_text.decode("utf-8")
if match(answer_text, keyword, options_dict=options_dict):
answers_found.append(answer_dict)
if len(answers_found) > CONFIG["search.limit"]:
answers_found.append(_limited_entry())
break
return answers_found | --- +++ @@ -1,3 +1,23 @@+"""
+Very naive search implementation. Just a placeholder.
+
+Exports:
+
+ find_answer_by_keyword()
+
+It should be implemented on the adapter basis:
+
+ 1. adapter.search(keyword) returns list of matching answers
+ * maybe with some initial weight
+ 2. ranking is done
+ 3. sorted results are returned
+ 4. eage page are cut by keyword
+ 5. results are paginated
+
+Configuration parameters:
+
+ search.limit
+"""
import re
@@ -15,6 +35,7 @@
def _parse_options(options):
+ """Parse search options string into optiond_dict"""
if options is None:
return {}
@@ -28,6 +49,13 @@
def match(paragraph, keyword, options=None, options_dict=None):
+ """Search for each keyword from `keywords` in `page`
+ and if all of them are found, return `True`.
+ Otherwise return `False`.
+
+ Several keywords can be joined together using ~
+ For example: ~ssh~passphrase
+ """
if keyword is None:
return True
@@ -58,6 +86,10 @@
def find_answers_by_keyword(directory, keyword, options="", request_options=None):
+ """
+ Search in the whole tree of all cheatsheets or in its subtree `directory`
+ by `keyword`
+ """
options_dict = _parse_options(options)
@@ -86,4 +118,4 @@ answers_found.append(_limited_entry())
break
- return answers_found+ return answers_found
| https://raw.githubusercontent.com/chubin/cheat.sh/HEAD/lib/search.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.