instruction stringclasses 100
values | code stringlengths 78 193k | response stringlengths 259 170k | file stringlengths 59 203 |
|---|---|---|---|
Please document this code using docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from contextlib import nullcontext
from typing import TYPE_CHECKING, Optional
import torch
from transformers.integrations import is_deepspeed_zero3_enabled
from ...extras import logging
if TYPE_CHECKING:
from transformers import PreTrainedModel, PreTrainedTokenizer
logger = logging.get_logger(__name__)
def _noisy_mean_initialization(embed_weight: "torch.Tensor", num_new_tokens: int) -> None:
embedding_dim = embed_weight.size(1)
avg_weight = embed_weight[:-num_new_tokens].mean(dim=0, keepdim=True)
noise_weight = torch.empty_like(embed_weight[-num_new_tokens:])
noise_weight.normal_(mean=0, std=(1.0 / math.sqrt(embedding_dim)))
embed_weight[-num_new_tokens:] = avg_weight + noise_weight
def _description_based_initialization(
embed_weight: "torch.Tensor",
num_new_tokens: int,
descriptions: dict[str, str],
tokenizer: "PreTrainedTokenizer",
model: "PreTrainedModel",
add_noise: bool = False,
) -> None:
embedding_dim = embed_weight.size(1)
for i, desc in enumerate(descriptions.values()):
# Tokenize description text
tokens = tokenizer(desc, return_tensors="pt", add_special_tokens=False)
with torch.no_grad():
token_ids = tokens["input_ids"][0]
# Move to the same device as embed_weight
device = embed_weight.device
token_ids = token_ids.to(device)
# Filter out new tokens (they don't have valid embeddings yet)
valid_token_ids = token_ids[token_ids < (len(tokenizer) - num_new_tokens)]
if len(valid_token_ids) == 0:
# Fallback: use mean of all existing embeddings
logger.warning_rank0(
f"Description for token {i + 1}/{num_new_tokens} contains no valid tokens. "
"Using mean of existing embeddings."
)
base_embedding = embed_weight[:-num_new_tokens].mean(dim=0)
else:
# Get embeddings of description tokens and average them
token_embeds = model.get_input_embeddings()(valid_token_ids)
base_embedding = token_embeds.mean(dim=0)
# Add noise if requested (ensure correct device and dtype)
if add_noise:
noise = torch.randn_like(base_embedding) * (1.0 / math.sqrt(embedding_dim))
embed_weight[-num_new_tokens + i] = base_embedding + noise
else:
embed_weight[-num_new_tokens + i] = base_embedding
def _initialize_embeddings(
embed_weight: "torch.Tensor",
num_new_tokens: int,
init_method: str,
new_special_tokens_config: Optional[dict],
tokenizer: "PreTrainedTokenizer",
model: "PreTrainedModel",
) -> None:
if init_method == "desc_init" and new_special_tokens_config:
logger.info_rank0("Using semantic initialization (desc_init) for new special tokens")
_description_based_initialization(
embed_weight, num_new_tokens, new_special_tokens_config, tokenizer, model, add_noise=False
)
elif init_method == "desc_init_w_noise" and new_special_tokens_config:
logger.info_rank0("Using semantic initialization with noise (desc_init_w_noise) for new special tokens")
_description_based_initialization(
embed_weight, num_new_tokens, new_special_tokens_config, tokenizer, model, add_noise=True
)
else:
if init_method != "noise_init":
logger.warning_rank0(
f"init_method='{init_method}' requires descriptions config, falling back to 'noise_init'"
)
logger.info_rank0("Using noisy mean initialization (noise_init) for new special tokens")
_noisy_mean_initialization(embed_weight, num_new_tokens)
def resize_embedding_layer(
model: "PreTrainedModel",
tokenizer: "PreTrainedTokenizer",
new_special_tokens_config: Optional[dict] = None,
init_special_tokens: str = "noise_init",
) -> None:
if is_deepspeed_zero3_enabled():
import deepspeed # type: ignore
params = [model.get_input_embeddings().weight]
if model.get_output_embeddings() is not None and not model.config.tie_word_embeddings:
params.append(model.get_output_embeddings().weight)
context_maybe_zero3 = deepspeed.zero.GatheredParameters(params, modifier_rank=0)
else:
context_maybe_zero3 = nullcontext()
with context_maybe_zero3:
current_embedding_size = model.get_input_embeddings().weight.size(0)
if len(tokenizer) > current_embedding_size:
if getattr(model, "quantization_method", None):
raise ValueError("Cannot resize embedding layers of a quantized model.")
if not isinstance(model.get_output_embeddings(), torch.nn.Linear):
raise ValueError("Current model does not support resizing embedding layers.")
model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=64)
with context_maybe_zero3:
new_embedding_size = model.get_input_embeddings().weight.size(0)
num_new_tokens = new_embedding_size - current_embedding_size
logger.info_rank0(
f"Resizing embeddings: {current_embedding_size} -> {new_embedding_size} (+{num_new_tokens} tokens)"
)
# Initialize input embeddings
_initialize_embeddings(
model.get_input_embeddings().weight.data,
num_new_tokens,
init_special_tokens,
new_special_tokens_config,
tokenizer,
model,
)
# Initialize output embeddings if not tied
if model.get_output_embeddings() is not None and not model.config.tie_word_embeddings:
_initialize_embeddings(
model.get_output_embeddings().weight.data,
num_new_tokens,
init_special_tokens,
new_special_tokens_config,
tokenizer,
model,
)
model.config.vocab_size = new_embedding_size
logger.info_rank0(f"Resized token embeddings from {current_embedding_size} to {new_embedding_size}.") | --- +++ @@ -30,6 +30,14 @@
def _noisy_mean_initialization(embed_weight: "torch.Tensor", num_new_tokens: int) -> None:
+ """Initialize new token embeddings with mean + Gaussian noise.
+
+ This is the default initialization method used by LlamaFactory.
+
+ Args:
+ embed_weight: The embedding weight matrix to initialize (shape: [vocab_size, embedding_dim])
+ num_new_tokens: Number of new tokens added at the end of the embedding matrix
+ """
embedding_dim = embed_weight.size(1)
avg_weight = embed_weight[:-num_new_tokens].mean(dim=0, keepdim=True)
noise_weight = torch.empty_like(embed_weight[-num_new_tokens:])
@@ -45,6 +53,29 @@ model: "PreTrainedModel",
add_noise: bool = False,
) -> None:
+ """Initialize new token embeddings based on textual descriptions.
+
+ For each new token, this function:
+ 1. Tokenizes its description text
+ 2. Gets embeddings of the description tokens
+ 3. Averages them to initialize the new token's embedding
+ 4. Optionally adds Gaussian noise
+
+ Args:
+ embed_weight: The embedding weight matrix to initialize (shape: [vocab_size, embedding_dim])
+ num_new_tokens: Number of new tokens added
+ descriptions: Dict mapping token string to its description text
+ e.g., {"<think>": "A token representing reasoning process"}
+ tokenizer: The tokenizer instance
+ model: The model instance (used to get input embeddings)
+ add_noise: Whether to add Gaussian noise to the initialization
+
+ Example:
+ descriptions = {
+ "<|START_OF_SVG|>": "Marks the beginning of an SVG document",
+ "<|END_OF_SVG|>": "Marks the end of an SVG document"
+ }
+ """
embedding_dim = embed_weight.size(1)
for i, desc in enumerate(descriptions.values()):
@@ -88,6 +119,18 @@ tokenizer: "PreTrainedTokenizer",
model: "PreTrainedModel",
) -> None:
+ """Single source of truth for embedding initialization.
+
+ This function selects the appropriate initialization method and applies it.
+
+ Args:
+ embed_weight: The embedding weight matrix to initialize
+ num_new_tokens: Number of new tokens added
+ init_method: Initialization method ('noise_init', 'desc_init', 'desc_init_w_noise')
+ new_special_tokens_config: Config dict with token descriptions (required for desc_init methods)
+ tokenizer: The tokenizer instance
+ model: The model instance
+ """
if init_method == "desc_init" and new_special_tokens_config:
logger.info_rank0("Using semantic initialization (desc_init) for new special tokens")
_description_based_initialization(
@@ -113,6 +156,14 @@ new_special_tokens_config: Optional[dict] = None,
init_special_tokens: str = "noise_init",
) -> None:
+ r"""Resize token embeddings and initialize new tokens.
+
+ Args:
+ model: The model to resize
+ tokenizer: The tokenizer (used to get target vocab size)
+ new_special_tokens_config: Optional dict with token descriptions for semantic initialization
+ init_special_tokens: Initialization method ('noise_init', 'desc_init', 'desc_init_w_noise')
+ """
if is_deepspeed_zero3_enabled():
import deepspeed # type: ignore
@@ -164,4 +215,4 @@ )
model.config.vocab_size = new_embedding_size
- logger.info_rank0(f"Resized token embeddings from {current_embedding_size} to {new_embedding_size}.")+ logger.info_rank0(f"Resized token embeddings from {current_embedding_size} to {new_embedding_size}.")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/model/model_utils/embedding.py |
Add return value explanations in docstrings | # Copyright 2025 HuggingFace Inc., the KVCache.AI team, Approaching AI, and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/language-modeling/run_clm.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from dataclasses import asdict, dataclass, field, fields
from typing import Any, Literal, Self
import torch
from omegaconf import OmegaConf
from transformers.training_args import _convert_str_dict
from ..extras.constants import AttentionFunction, EngineName, QuantizationMethod, RopeScaling
from ..extras.logging import get_logger
logger = get_logger(__name__)
@dataclass
class BaseModelArguments:
model_name_or_path: str | None = field(
default=None,
metadata={
"help": "Path to the model weight or identifier from huggingface.co/models or modelscope.cn/models."
},
)
adapter_name_or_path: str | None = field(
default=None,
metadata={
"help": (
"Path to the adapter weight or identifier from huggingface.co/models. "
"Use commas to separate multiple adapters."
)
},
)
adapter_folder: str | None = field(
default=None,
metadata={"help": "The folder containing the adapter weights to load."},
)
cache_dir: str | None = field(
default=None,
metadata={"help": "Where to store the pre-trained models downloaded from huggingface.co or modelscope.cn."},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether or not to use one of the fast tokenizer (backed by the tokenizers library)."},
)
resize_vocab: bool = field(
default=False,
metadata={"help": "Whether or not to resize the tokenizer vocab and the embedding layers."},
)
split_special_tokens: bool = field(
default=False,
metadata={"help": "Whether or not the special tokens should be split during the tokenization process."},
)
add_tokens: str | None = field(
default=None,
metadata={
"help": "Non-special tokens to be added into the tokenizer. Use commas to separate multiple tokens."
},
)
add_special_tokens: str | None = field(
default=None,
metadata={"help": "Special tokens to be added into the tokenizer. Use commas to separate multiple tokens."},
)
new_special_tokens_config: str | None = field(
default=None,
metadata={
"help": (
"Path to YAML config with special token descriptions for semantic initialization. "
"If set, this takes precedence over add_special_tokens. "
"YAML format: {'<token>': 'description text', ...}"
)
},
)
init_special_tokens: Literal["noise_init", "desc_init", "desc_init_w_noise"] = field(
default="noise_init",
metadata={
"help": (
"Initialization method for new special tokens: "
"'noise_init' (default, random noise around mean), "
"'desc_init' (semantic initialization from descriptions), "
"'desc_init_w_noise' (semantic + random noise). "
"Note: 'desc_init' methods require new_special_tokens_config."
)
},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
low_cpu_mem_usage: bool = field(
default=True,
metadata={"help": "Whether or not to use memory-efficient model loading."},
)
rope_scaling: RopeScaling | None = field(
default=None,
metadata={"help": "Which scaling strategy should be adopted for the RoPE embeddings."},
)
flash_attn: AttentionFunction = field(
default=AttentionFunction.AUTO,
metadata={"help": "Enable FlashAttention for faster training and inference."},
)
shift_attn: bool = field(
default=False,
metadata={"help": "Enable shift short attention (S^2-Attn) proposed by LongLoRA."},
)
mixture_of_depths: Literal["convert", "load"] | None = field(
default=None,
metadata={"help": "Convert the model to mixture-of-depths (MoD) or load the MoD model."},
)
use_unsloth: bool = field(
default=False,
metadata={"help": "Whether or not to use unsloth's optimization for the LoRA training."},
)
use_unsloth_gc: bool = field(
default=False,
metadata={"help": "Whether or not to use unsloth's gradient checkpointing (no need to install unsloth)."},
)
enable_liger_kernel: bool = field(
default=False,
metadata={"help": "Whether or not to enable liger kernel for faster training."},
)
moe_aux_loss_coef: float | None = field(
default=None,
metadata={"help": "Coefficient of the auxiliary router loss in mixture-of-experts model."},
)
disable_gradient_checkpointing: bool = field(
default=False,
metadata={"help": "Whether or not to disable gradient checkpointing."},
)
use_reentrant_gc: bool = field(
default=True,
metadata={"help": "Whether or not to use reentrant gradient checkpointing."},
)
upcast_layernorm: bool = field(
default=False,
metadata={"help": "Whether or not to upcast the layernorm weights in fp32."},
)
upcast_lmhead_output: bool = field(
default=False,
metadata={"help": "Whether or not to upcast the output of lm_head in fp32."},
)
train_from_scratch: bool = field(
default=False,
metadata={"help": "Whether or not to randomly initialize the model weights."},
)
infer_backend: EngineName = field(
default=EngineName.HF,
metadata={"help": "Backend engine used at inference."},
)
offload_folder: str = field(
default="offload",
metadata={"help": "Path to offload model weights."},
)
use_kv_cache: bool = field(
default=True,
metadata={"help": "Whether or not to use KV cache in generation."},
)
use_v1_kernels: bool | None = field(
default=False,
metadata={"help": "Whether or not to use high-performance kernels in training."},
)
infer_dtype: Literal["auto", "float16", "bfloat16", "float32"] = field(
default="auto",
metadata={"help": "Data type for model weights and activations at inference."},
)
hf_hub_token: str | None = field(
default=None,
metadata={"help": "Auth token to log in with Hugging Face Hub."},
)
ms_hub_token: str | None = field(
default=None,
metadata={"help": "Auth token to log in with ModelScope Hub."},
)
om_hub_token: str | None = field(
default=None,
metadata={"help": "Auth token to log in with Modelers Hub."},
)
print_param_status: bool = field(
default=False,
metadata={"help": "For debugging purposes, print the status of the parameters in the model."},
)
trust_remote_code: bool = field(
default=False,
metadata={"help": "Whether to trust the execution of code from datasets/models defined on the Hub or not."},
)
def __post_init__(self):
if self.model_name_or_path is None:
raise ValueError("Please provide `model_name_or_path`.")
if self.adapter_name_or_path is not None: # support merging multiple lora weights
self.adapter_name_or_path = [path.strip() for path in self.adapter_name_or_path.split(",")]
if self.add_tokens is not None: # support multiple tokens
self.add_tokens = [token.strip() for token in self.add_tokens.split(",")]
# Process special tokens with priority: new_special_tokens_config > add_special_tokens
if self.new_special_tokens_config is not None:
# Priority 1: Load from YAML config (extracts both tokens and descriptions)
try:
cfg = OmegaConf.load(self.new_special_tokens_config)
token_descriptions = OmegaConf.to_container(cfg)
if not isinstance(token_descriptions, dict):
raise ValueError(
f"YAML config must be a dictionary mapping tokens to descriptions. "
f"Got: {type(token_descriptions)}"
)
# Extract token list from config keys
extracted_tokens = list(token_descriptions.keys())
# Warn if both are set
if self.add_special_tokens is not None:
logger.warning_rank0(
"Both 'new_special_tokens_config' and 'add_special_tokens' are set. "
f"Using tokens from config: {extracted_tokens}"
)
# Override add_special_tokens with extracted tokens (as list)
self.add_special_tokens = extracted_tokens
# Store descriptions internally for later use (internal attribute)
self._special_token_descriptions = token_descriptions
logger.info_rank0(
f"Loaded {len(extracted_tokens)} special tokens with descriptions from: "
f"{self.new_special_tokens_config}"
)
except Exception as e:
logger.error_rank0(
f"Failed to load special tokens config from '{self.new_special_tokens_config}': {e}"
)
raise
elif self.add_special_tokens is not None:
# Priority 2: Use simple comma-separated string (no descriptions)
self.add_special_tokens = [token.strip() for token in self.add_special_tokens.split(",")]
self._special_token_descriptions = None
else:
# No special tokens to add
self._special_token_descriptions = None
# Validate init method
if self.init_special_tokens in ["desc_init", "desc_init_w_noise"]:
if self._special_token_descriptions is None:
logger.warning_rank0(
f"init_special_tokens='{self.init_special_tokens}' requires new_special_tokens_config. "
"Falling back to 'noise_init'"
)
self.init_special_tokens = "noise_init"
@dataclass
class QuantizationArguments:
quantization_method: QuantizationMethod = field(
default=QuantizationMethod.BNB,
metadata={"help": "Quantization method to use for on-the-fly quantization."},
)
quantization_bit: int | None = field(
default=None,
metadata={"help": "The number of bits to quantize the model using on-the-fly quantization."},
)
quantization_type: Literal["fp4", "nf4"] = field(
default="nf4",
metadata={"help": "Quantization data type to use in bitsandbytes int4 training."},
)
double_quantization: bool = field(
default=True,
metadata={"help": "Whether or not to use double quantization in bitsandbytes int4 training."},
)
quantization_device_map: Literal["auto"] | None = field(
default=None,
metadata={"help": "Device map used to infer the 4-bit quantized model, needs bitsandbytes>=0.43.0."},
)
@dataclass
class ProcessorArguments:
image_max_pixels: int = field(
default=768 * 768,
metadata={"help": "The maximum number of pixels of image inputs."},
)
image_min_pixels: int = field(
default=32 * 32,
metadata={"help": "The minimum number of pixels of image inputs."},
)
image_do_pan_and_scan: bool = field(
default=False,
metadata={"help": "Use pan and scan to process image for gemma3."},
)
crop_to_patches: bool = field(
default=False,
metadata={"help": "Whether to crop the image to patches for internvl."},
)
video_max_pixels: int = field(
default=256 * 256,
metadata={"help": "The maximum number of pixels of video inputs."},
)
video_min_pixels: int = field(
default=16 * 16,
metadata={"help": "The minimum number of pixels of video inputs."},
)
video_fps: float = field(
default=2.0,
metadata={"help": "The frames to sample per second for video inputs."},
)
video_maxlen: int = field(
default=128,
metadata={"help": "The maximum number of sampled frames for video inputs."},
)
use_audio_in_video: bool = field(
default=False,
metadata={"help": "Whether or not to use audio in video inputs."},
)
audio_sampling_rate: int = field(
default=16000,
metadata={"help": "The sampling rate of audio inputs."},
)
def __post_init__(self):
if self.image_max_pixels < self.image_min_pixels:
raise ValueError("`image_max_pixels` cannot be smaller than `image_min_pixels`.")
if self.video_max_pixels < self.video_min_pixels:
raise ValueError("`video_max_pixels` cannot be smaller than `video_min_pixels`.")
@dataclass
class ExportArguments:
export_dir: str | None = field(
default=None,
metadata={"help": "Path to the directory to save the exported model."},
)
export_size: int = field(
default=5,
metadata={"help": "The file shard size (in GB) of the exported model."},
)
export_device: Literal["cpu", "auto"] = field(
default="cpu",
metadata={"help": "The device used in model export, use `auto` to accelerate exporting."},
)
export_quantization_bit: int | None = field(
default=None,
metadata={"help": "The number of bits to quantize the exported model."},
)
export_quantization_dataset: str | None = field(
default=None,
metadata={"help": "Path to the dataset or dataset name to use in quantizing the exported model."},
)
export_quantization_nsamples: int = field(
default=128,
metadata={"help": "The number of samples used for quantization."},
)
export_quantization_maxlen: int = field(
default=1024,
metadata={"help": "The maximum length of the model inputs used for quantization."},
)
export_legacy_format: bool = field(
default=False,
metadata={"help": "Whether or not to save the `.bin` files instead of `.safetensors`."},
)
export_hub_model_id: str | None = field(
default=None,
metadata={"help": "The name of the repository if push the model to the Hugging Face hub."},
)
def __post_init__(self):
if self.export_quantization_bit is not None and self.export_quantization_dataset is None:
raise ValueError("Quantization dataset is necessary for exporting.")
@dataclass
class VllmArguments:
vllm_maxlen: int = field(
default=4096,
metadata={"help": "Maximum sequence (prompt + response) length of the vLLM engine."},
)
vllm_gpu_util: float = field(
default=0.7,
metadata={"help": "The fraction of GPU memory in (0,1) to be used for the vLLM engine."},
)
vllm_enforce_eager: bool = field(
default=False,
metadata={"help": "Whether or not to disable CUDA graph in the vLLM engine."},
)
vllm_max_lora_rank: int = field(
default=32,
metadata={"help": "Maximum rank of all LoRAs in the vLLM engine."},
)
vllm_config: dict | str | None = field(
default=None,
metadata={"help": "Config to initialize the vllm engine. Please use JSON strings."},
)
def __post_init__(self):
if isinstance(self.vllm_config, str) and self.vllm_config.startswith("{"):
self.vllm_config = _convert_str_dict(json.loads(self.vllm_config))
@dataclass
class SGLangArguments:
sglang_maxlen: int = field(
default=4096,
metadata={"help": "Maximum sequence (prompt + response) length of the SGLang engine."},
)
sglang_mem_fraction: float = field(
default=0.7,
metadata={"help": "The memory fraction (0-1) to be used for the SGLang engine."},
)
sglang_tp_size: int = field(
default=-1,
metadata={"help": "Tensor parallel size for the SGLang engine."},
)
sglang_config: dict | str | None = field(
default=None,
metadata={"help": "Config to initialize the SGLang engine. Please use JSON strings."},
)
sglang_lora_backend: Literal["triton", "flashinfer"] = field(
default="triton",
metadata={
"help": "The backend of running GEMM kernels for Lora modules. Recommend using the Triton LoRA backend for better performance and stability."
},
)
def __post_init__(self):
if isinstance(self.sglang_config, str) and self.sglang_config.startswith("{"):
self.sglang_config = _convert_str_dict(json.loads(self.sglang_config))
@dataclass
class KTransformersArguments:
use_kt: bool = field(
default=False,
metadata={"help": "Whether To Use KTransformers Optimizations For LoRA Training."},
)
kt_optimize_rule: str | None = field(
default=None,
metadata={
"help": "Path To The KTransformers Optimize Rule; See https://github.com/kvcache-ai/ktransformers/."
},
)
cpu_infer: int | None = field(
default=32,
metadata={"help": "Number Of CPU Cores Used For Computation."},
)
chunk_size: int | None = field(
default=8192,
metadata={"help": "Chunk Size Used For CPU Compute In KTransformers."},
)
mode: str | None = field(
default="normal",
metadata={"help": "Normal Or Long_Context For Llama Models."},
)
kt_maxlen: int = field(
default=4096,
metadata={"help": "Maximum Sequence (Prompt + Response) Length Of The KT Engine."},
)
kt_use_cuda_graph: bool = field(
default=True,
metadata={"help": "Whether To Use CUDA Graphs For The KT Engine."},
)
kt_mode: str = field(
default="normal",
metadata={"help": "Normal Or Long_Context Mode For The KT Engine."},
)
kt_force_think: bool = field(
default=False,
metadata={"help": "Force-Think Toggle For The KT Engine."},
)
@dataclass
class ModelArguments(
SGLangArguments,
VllmArguments,
KTransformersArguments,
ExportArguments,
ProcessorArguments,
QuantizationArguments,
BaseModelArguments,
):
compute_dtype: torch.dtype | None = field(
default=None,
init=False,
metadata={"help": "Torch data type for computing model outputs, derived from `fp/bf16`. Do not specify it."},
)
device_map: str | dict[str, Any] | None = field(
default=None,
init=False,
metadata={"help": "Device map for model placement, derived from training stage. Do not specify it."},
)
model_max_length: int | None = field(
default=None,
init=False,
metadata={"help": "The maximum input length for model, derived from `cutoff_len`. Do not specify it."},
)
block_diag_attn: bool = field(
default=False,
init=False,
metadata={"help": "Whether use block diag attention or not, derived from `neat_packing`. Do not specify it."},
)
def __post_init__(self):
BaseModelArguments.__post_init__(self)
ProcessorArguments.__post_init__(self)
ExportArguments.__post_init__(self)
VllmArguments.__post_init__(self)
SGLangArguments.__post_init__(self)
@classmethod
def copyfrom(cls, source: "Self", **kwargs) -> "Self":
init_args, lazy_args = {}, {}
for attr in fields(source):
if attr.init:
init_args[attr.name] = getattr(source, attr.name)
else:
lazy_args[attr.name] = getattr(source, attr.name)
init_args.update(kwargs)
result = cls(**init_args)
for name, value in lazy_args.items():
setattr(result, name, value)
return result
def to_dict(self) -> dict[str, Any]:
args = asdict(self)
args = {k: f"<{k.upper()}>" if k.endswith("token") else v for k, v in args.items()}
return args | --- +++ @@ -32,6 +32,7 @@
@dataclass
class BaseModelArguments:
+ r"""Arguments pertaining to the model."""
model_name_or_path: str | None = field(
default=None,
@@ -272,6 +273,7 @@
@dataclass
class QuantizationArguments:
+ r"""Arguments pertaining to the quantization method."""
quantization_method: QuantizationMethod = field(
default=QuantizationMethod.BNB,
@@ -297,6 +299,7 @@
@dataclass
class ProcessorArguments:
+ r"""Arguments pertaining to the image processor."""
image_max_pixels: int = field(
default=768 * 768,
@@ -349,6 +352,7 @@
@dataclass
class ExportArguments:
+ r"""Arguments pertaining to the model export."""
export_dir: str | None = field(
default=None,
@@ -394,6 +398,7 @@
@dataclass
class VllmArguments:
+ r"""Arguments pertaining to the vLLM worker."""
vllm_maxlen: int = field(
default=4096,
@@ -423,6 +428,7 @@
@dataclass
class SGLangArguments:
+ r"""Arguments pertaining to the SGLang worker."""
sglang_maxlen: int = field(
default=4096,
@@ -454,6 +460,7 @@
@dataclass
class KTransformersArguments:
+ r"""Arguments pertaining to the KT training."""
use_kt: bool = field(
default=False,
@@ -506,6 +513,10 @@ QuantizationArguments,
BaseModelArguments,
):
+ r"""Arguments pertaining to which model/config/tokenizer we are going to fine-tune or infer.
+
+ The class on the most right will be displayed first.
+ """
compute_dtype: torch.dtype | None = field(
default=None,
@@ -554,4 +565,4 @@ def to_dict(self) -> dict[str, Any]:
args = asdict(self)
args = {k: f"<{k.upper()}>" if k.endswith("token") else v for k, v in args.items()}
- return args+ return args
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/hparams/model_args.py |
Generate docstrings for this script | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import signal
import sys
import time
from concurrent.futures import ThreadPoolExecutor
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Optional
import torch
import transformers
from peft import PeftModel
from transformers import PreTrainedModel, ProcessorMixin, TrainerCallback
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, has_length
from transformers.utils import SAFE_WEIGHTS_NAME, WEIGHTS_NAME
from typing_extensions import override
from ..extras import logging
from ..extras.constants import TRAINER_LOG, V_HEAD_SAFE_WEIGHTS_NAME, V_HEAD_WEIGHTS_NAME
from ..extras.misc import get_peak_memory, is_env_enabled, use_ray
from ..extras.packages import is_safetensors_available
if is_safetensors_available():
from safetensors import safe_open
from safetensors.torch import save_file
if TYPE_CHECKING:
from transformers import TrainerControl, TrainerState, TrainingArguments
from trl import AutoModelForCausalLMWithValueHead
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
logger = logging.get_logger(__name__)
def fix_valuehead_checkpoint(
model: "AutoModelForCausalLMWithValueHead", output_dir: str, safe_serialization: bool
) -> None:
if not isinstance(model.pretrained_model, (PreTrainedModel, PeftModel)):
return
if safe_serialization:
path_to_checkpoint = os.path.join(output_dir, SAFE_WEIGHTS_NAME)
with safe_open(path_to_checkpoint, framework="pt", device="cpu") as f:
state_dict: dict[str, torch.Tensor] = {key: f.get_tensor(key).clone() for key in f.keys()}
else:
path_to_checkpoint = os.path.join(output_dir, WEIGHTS_NAME)
state_dict: dict[str, torch.Tensor] = torch.load(path_to_checkpoint, map_location="cpu", weights_only=True)
os.remove(path_to_checkpoint)
decoder_state_dict, v_head_state_dict = {}, {}
for name, param in state_dict.items():
if name.startswith("v_head."):
v_head_state_dict[name] = param
else:
decoder_state_dict[name.replace("pretrained_model.", "", 1)] = param
model.pretrained_model.save_pretrained(
output_dir, state_dict=decoder_state_dict or None, safe_serialization=safe_serialization
)
if safe_serialization:
save_file(v_head_state_dict, os.path.join(output_dir, V_HEAD_SAFE_WEIGHTS_NAME), metadata={"format": "pt"})
else:
torch.save(v_head_state_dict, os.path.join(output_dir, V_HEAD_WEIGHTS_NAME))
logger.info_rank0(f"Value head model saved at: {output_dir}")
class FixValueHeadModelCallback(TrainerCallback):
@override
def on_save(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if args.should_save:
output_dir = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
fix_valuehead_checkpoint(
model=kwargs.pop("model"),
output_dir=output_dir,
safe_serialization=getattr(args, "save_safetensors", True),
)
class SaveProcessorCallback(TrainerCallback):
def __init__(self, processor: "ProcessorMixin") -> None:
self.processor = processor
@override
def on_save(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if args.should_save:
output_dir = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}")
self.processor.save_pretrained(output_dir)
@override
def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if args.should_save:
self.processor.save_pretrained(args.output_dir)
class PissaConvertCallback(TrainerCallback):
@override
def on_train_begin(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if args.should_save:
model = kwargs.pop("model")
pissa_init_dir = os.path.join(args.output_dir, "pissa_init")
logger.info_rank0(f"Initial PiSSA adapter will be saved at: {pissa_init_dir}.")
if isinstance(model, PeftModel):
init_lora_weights = getattr(model.peft_config["default"], "init_lora_weights")
setattr(model.peft_config["default"], "init_lora_weights", True)
model.save_pretrained(pissa_init_dir, safe_serialization=getattr(args, "save_safetensors", True))
setattr(model.peft_config["default"], "init_lora_weights", init_lora_weights)
@override
def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if args.should_save:
model = kwargs.pop("model")
pissa_init_dir = os.path.join(args.output_dir, "pissa_init")
pissa_backup_dir = os.path.join(args.output_dir, "pissa_backup")
pissa_convert_dir = os.path.join(args.output_dir, "pissa_converted")
logger.info_rank0(f"Converted PiSSA adapter will be saved at: {pissa_convert_dir}.")
# 1. save a pissa backup with init_lora_weights: True
# 2. save a converted lora with init_lora_weights: pissa
# 3. load the pissa backup with init_lora_weights: True
# 4. delete the initial adapter and change init_lora_weights to pissa
if isinstance(model, PeftModel):
init_lora_weights = getattr(model.peft_config["default"], "init_lora_weights")
setattr(model.peft_config["default"], "init_lora_weights", True)
model.save_pretrained(pissa_backup_dir, safe_serialization=getattr(args, "save_safetensors", True))
setattr(model.peft_config["default"], "init_lora_weights", init_lora_weights)
model.save_pretrained(
pissa_convert_dir,
safe_serialization=getattr(args, "save_safetensors", True),
path_initial_model_for_weight_conversion=pissa_init_dir,
)
model.load_adapter(pissa_backup_dir, "default", is_trainable=True)
model.set_adapter("default")
setattr(model.peft_config["default"], "init_lora_weights", init_lora_weights)
class LogCallback(TrainerCallback):
def __init__(self) -> None:
# Progress
self.start_time = 0
self.cur_steps = 0
self.max_steps = 0
self.elapsed_time = ""
self.remaining_time = ""
self.thread_pool: Optional[ThreadPoolExecutor] = None
# Status
self.aborted = False
self.do_train = False
# Web UI
self.webui_mode = is_env_enabled("LLAMABOARD_ENABLED")
if self.webui_mode and not use_ray():
signal.signal(signal.SIGABRT, self._set_abort)
self.logger_handler = logging.LoggerHandler(os.getenv("LLAMABOARD_WORKDIR"))
logging.add_handler(self.logger_handler)
transformers.logging.add_handler(self.logger_handler)
def _set_abort(self, signum, frame) -> None:
self.aborted = True
def _reset(self, max_steps: int = 0) -> None:
self.start_time = time.time()
self.cur_steps = 0
self.max_steps = max_steps
self.elapsed_time = ""
self.remaining_time = ""
def _timing(self, cur_steps: int) -> None:
cur_time = time.time()
elapsed_time = cur_time - self.start_time
avg_time_per_step = elapsed_time / cur_steps if cur_steps != 0 else 0
remaining_time = (self.max_steps - cur_steps) * avg_time_per_step
self.cur_steps = cur_steps
self.elapsed_time = str(timedelta(seconds=int(elapsed_time)))
self.remaining_time = str(timedelta(seconds=int(remaining_time)))
def _write_log(self, output_dir: str, logs: dict[str, Any]) -> None:
with open(os.path.join(output_dir, TRAINER_LOG), "a", encoding="utf-8") as f:
f.write(json.dumps(logs) + "\n")
def _create_thread_pool(self, output_dir: str) -> None:
os.makedirs(output_dir, exist_ok=True)
self.thread_pool = ThreadPoolExecutor(max_workers=1)
def _close_thread_pool(self) -> None:
if self.thread_pool is not None:
self.thread_pool.shutdown(wait=True)
self.thread_pool = None
@override
def on_init_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if (
args.should_save
and os.path.exists(os.path.join(args.output_dir, TRAINER_LOG))
and getattr(args, "overwrite_output_dir", False)
):
logger.warning_rank0_once("Previous trainer log in this folder will be deleted.")
os.remove(os.path.join(args.output_dir, TRAINER_LOG))
@override
def on_train_begin(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if args.should_save:
self.do_train = True
self._reset(max_steps=state.max_steps)
self._create_thread_pool(output_dir=args.output_dir)
@override
def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
self._close_thread_pool()
@override
def on_substep_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if self.aborted:
control.should_epoch_stop = True
control.should_training_stop = True
@override
def on_step_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if self.aborted:
control.should_epoch_stop = True
control.should_training_stop = True
@override
def on_evaluate(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if not self.do_train:
self._close_thread_pool()
@override
def on_predict(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if not self.do_train:
self._close_thread_pool()
@override
def on_log(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if not args.should_save:
return
self._timing(cur_steps=state.global_step)
logs = dict(
current_steps=self.cur_steps,
total_steps=self.max_steps,
loss=state.log_history[-1].get("loss"),
eval_loss=state.log_history[-1].get("eval_loss"),
predict_loss=state.log_history[-1].get("predict_loss"),
reward=state.log_history[-1].get("reward"),
accuracy=state.log_history[-1].get("rewards/accuracies"),
lr=state.log_history[-1].get("learning_rate"),
epoch=state.log_history[-1].get("epoch"),
percentage=round(self.cur_steps / self.max_steps * 100, 2) if self.max_steps != 0 else 100,
elapsed_time=self.elapsed_time,
remaining_time=self.remaining_time,
)
if state.num_input_tokens_seen:
logs["throughput"] = round(state.num_input_tokens_seen / (time.time() - self.start_time), 2)
logs["total_tokens"] = state.num_input_tokens_seen
if is_env_enabled("RECORD_VRAM"):
vram_allocated, vram_reserved = get_peak_memory()
logs["vram_allocated"] = round(vram_allocated / (1024**3), 2)
logs["vram_reserved"] = round(vram_reserved / (1024**3), 2)
logs = {k: v for k, v in logs.items() if v is not None}
if self.webui_mode and all(key in logs for key in ("loss", "lr", "epoch")):
log_str = f"'loss': {logs['loss']:.4f}, 'learning_rate': {logs['lr']:2.4e}, 'epoch': {logs['epoch']:.2f}"
for extra_key in ("reward", "accuracy", "throughput"):
if logs.get(extra_key):
log_str += f", '{extra_key}': {logs[extra_key]:.2f}"
logger.info_rank0("{" + log_str + "}")
if self.thread_pool is not None:
self.thread_pool.submit(self._write_log, args.output_dir, logs)
@override
def on_prediction_step(
self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs
):
if self.do_train:
return
if self.aborted:
sys.exit(0)
if not args.should_save:
return
eval_dataloader = kwargs.pop("eval_dataloader", None)
if has_length(eval_dataloader):
if self.max_steps == 0:
self._reset(max_steps=len(eval_dataloader))
self._create_thread_pool(output_dir=args.output_dir)
self._timing(cur_steps=self.cur_steps + 1)
if self.cur_steps % 5 == 0 and self.thread_pool is not None:
logs = dict(
current_steps=self.cur_steps,
total_steps=self.max_steps,
percentage=round(self.cur_steps / self.max_steps * 100, 2) if self.max_steps != 0 else 100,
elapsed_time=self.elapsed_time,
remaining_time=self.remaining_time,
)
self.thread_pool.submit(self._write_log, args.output_dir, logs)
class ReporterCallback(TrainerCallback):
def __init__(
self,
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
) -> None:
self.model_args = model_args
self.data_args = data_args
self.finetuning_args = finetuning_args
self.generating_args = generating_args
os.environ["WANDB_PROJECT"] = os.getenv("WANDB_PROJECT", "llamafactory")
@override
def on_train_begin(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
if not state.is_world_process_zero:
return
if "wandb" in args.report_to:
import wandb
wandb.config.update(
{
"model_args": self.model_args.to_dict(),
"data_args": self.data_args.to_dict(),
"finetuning_args": self.finetuning_args.to_dict(),
"generating_args": self.generating_args.to_dict(),
}
)
if "trackio" in args.report_to:
import trackio
trackio.config.update(
{
"model_args": self.model_args.to_dict(),
"data_args": self.data_args.to_dict(),
"finetuning_args": self.finetuning_args.to_dict(),
"generating_args": self.generating_args.to_dict(),
}
)
if self.finetuning_args.use_swanlab:
import swanlab # type: ignore
swanlab.config.update(
{
"model_args": self.model_args.to_dict(),
"data_args": self.data_args.to_dict(),
"finetuning_args": self.finetuning_args.to_dict(),
"generating_args": self.generating_args.to_dict(),
}
) | --- +++ @@ -53,6 +53,17 @@ def fix_valuehead_checkpoint(
model: "AutoModelForCausalLMWithValueHead", output_dir: str, safe_serialization: bool
) -> None:
+ r"""Fix the valuehead checkpoint files.
+
+ The model is already unwrapped.
+
+ There are three cases:
+ 1. full tuning without ds_zero3: state_dict = {"model.layers.*": ..., "v_head.summary.*": ...}
+ 2. lora tuning without ds_zero3: state_dict = {"v_head.summary.*": ...}
+ 3. under deepspeed zero3: state_dict = {"pretrained_model.model.layers.*": ..., "v_head.summary.*": ...}
+
+ We assume `stage3_gather_16bit_weights_on_model_save=true`.
+ """
if not isinstance(model.pretrained_model, (PreTrainedModel, PeftModel)):
return
@@ -85,6 +96,7 @@
class FixValueHeadModelCallback(TrainerCallback):
+ r"""A callback for fixing the checkpoint for valuehead models."""
@override
def on_save(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
@@ -98,6 +110,7 @@
class SaveProcessorCallback(TrainerCallback):
+ r"""A callback for saving the processor."""
def __init__(self, processor: "ProcessorMixin") -> None:
self.processor = processor
@@ -115,6 +128,7 @@
class PissaConvertCallback(TrainerCallback):
+ r"""A callback for converting the PiSSA adapter to a normal one."""
@override
def on_train_begin(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs):
@@ -156,6 +170,7 @@
class LogCallback(TrainerCallback):
+ r"""A callback for logging training and evaluation status."""
def __init__(self) -> None:
# Progress
@@ -324,6 +339,7 @@
class ReporterCallback(TrainerCallback):
+ r"""A callback for reporting training status to external logger."""
def __init__(
self,
@@ -377,4 +393,4 @@ "finetuning_args": self.finetuning_args.to_dict(),
"generating_args": self.generating_args.to_dict(),
}
- )+ )
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/callbacks.py |
Generate consistent docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from ..data import Role
from ..extras.constants import CHOICES
@dataclass
class EvalTemplate:
system: str
choice: str
answer: str
def _parse_example(self, example: dict[str, str]) -> tuple[str, str]:
candidates = [self.choice.format(choice=ch, content=example[ch]) for ch in CHOICES if ch in example]
return "".join([example["question"]] + candidates + [self.answer]), example["answer"]
def format_example(
self, target_data: dict[str, str], support_set: list[dict[str, str]], subject_name: str
) -> list[dict[str, str]]:
messages = []
for k in range(len(support_set)):
prompt, response = self._parse_example(support_set[k])
messages.append({"role": Role.USER.value, "content": prompt})
messages.append({"role": Role.ASSISTANT.value, "content": response})
prompt, response = self._parse_example(target_data)
messages.append({"role": Role.USER.value, "content": prompt})
messages.append({"role": Role.ASSISTANT.value, "content": response})
messages[0]["content"] = self.system.format(subject=subject_name) + messages[0]["content"]
return messages
eval_templates: dict[str, "EvalTemplate"] = {}
def _register_eval_template(name: str, system: str, choice: str, answer: str) -> None:
eval_templates[name] = EvalTemplate(system=system, choice=choice, answer=answer)
def get_eval_template(name: str) -> "EvalTemplate":
eval_template = eval_templates.get(name, None)
assert eval_template is not None, f"Template {name} does not exist."
return eval_template
_register_eval_template(
name="en",
system="The following are multiple choice questions (with answers) about {subject}.\n\n",
choice="\n{choice}. {content}",
answer="\nAnswer:",
)
_register_eval_template(
name="zh",
system="以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n",
choice="\n{choice}. {content}",
answer="\n答案:",
) | --- +++ @@ -25,12 +25,18 @@ answer: str
def _parse_example(self, example: dict[str, str]) -> tuple[str, str]:
+ r"""Parse eval example.
+
+ input: a dict with keys {"question", "A", "B", "C", "D", "answer"}
+ output: a tuple of (prompt, response).
+ """
candidates = [self.choice.format(choice=ch, content=example[ch]) for ch in CHOICES if ch in example]
return "".join([example["question"]] + candidates + [self.answer]), example["answer"]
def format_example(
self, target_data: dict[str, str], support_set: list[dict[str, str]], subject_name: str
) -> list[dict[str, str]]:
+ r"""Convert dataset examples to messages."""
messages = []
for k in range(len(support_set)):
prompt, response = self._parse_example(support_set[k])
@@ -70,4 +76,4 @@ system="以下是中国关于{subject}考试的单项选择题,请选出其中的正确答案。\n\n",
choice="\n{choice}. {content}",
answer="\n答案:",
-)+)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/eval/template.py |
Document this module using docstrings | # Copyright 2025 Musab Gultekin and the LlamaFactory team.
#
# This code is based on the Musab Gultekin's functionary library.
# https://github.com/MeetKai/functionary/blob/main/functionary/train/packing/monkey_patch_packing.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# MIT License
#
# Copyright (c) 2023 Musab Gultekin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import TYPE_CHECKING
import torch
import torch.nn.functional as F
from ...extras import logging
if TYPE_CHECKING:
from ...hparams import ModelArguments
logger = logging.get_logger(__name__)
def get_seqlens_in_batch(attention_mask: "torch.Tensor") -> "torch.Tensor":
bsz = attention_mask.size(0)
dtype, device = attention_mask.dtype, attention_mask.device
max_num = torch.max(attention_mask).item()
counts: torch.Tensor = torch.zeros((bsz, max_num), dtype=dtype, device=device)
for i in range(max_num):
counts[:, i] = torch.sum(attention_mask == (i + 1), dim=-1)
counts = counts.flatten()
seqlens = counts[counts.nonzero().squeeze(dim=-1)]
return seqlens
def get_unpad_data(attention_mask: "torch.Tensor") -> tuple["torch.Tensor", "torch.Tensor", int]:
seqlens_in_batch = get_seqlens_in_batch(attention_mask)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
return indices, cu_seqlens, max_seqlen_in_batch
def configure_packing(model_args: "ModelArguments", is_trainable: bool) -> None:
if not is_trainable or not model_args.block_diag_attn:
return
import transformers.modeling_flash_attention_utils
transformers.modeling_flash_attention_utils._get_unpad_data = get_unpad_data
logger.info_rank0("Using block diagonal attention for sequence packing without cross-attention.") | --- +++ @@ -53,6 +53,19 @@
def get_seqlens_in_batch(attention_mask: "torch.Tensor") -> "torch.Tensor":
+ r"""Get the sequence lengths in the current batch.
+
+ e.g.
+ ```python
+ # input
+ [
+ [1, 1, 2, 2, 2, 0],
+ [1, 2, 2, 3, 3, 3],
+ ]
+ # output
+ [2, 3, 1, 2, 3]
+ ```
+ """
bsz = attention_mask.size(0)
dtype, device = attention_mask.dtype, attention_mask.device
max_num = torch.max(attention_mask).item()
@@ -66,6 +79,27 @@
def get_unpad_data(attention_mask: "torch.Tensor") -> tuple["torch.Tensor", "torch.Tensor", int]:
+ r"""Prepare the indices and seqlens for flash attn varlen function.
+
+ Returns:
+ indices: indices of non-masked tokens from the flattened sequence.
+ cu_seqlens: the cumulative sequence lengths in the current batch, always starts from 0.
+ max_seqlen_in_batch: the largest seqlen in the current batch.
+
+ e.g.
+ ```python
+ # input
+ [
+ [1, 1, 2, 2, 2, 0],
+ [1, 2, 2, 3, 3, 3],
+ ]
+ # output
+ [0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11]
+ [0, 2, 5, 6, 8, 11]
+ 3
+ ```
+
+ """
seqlens_in_batch = get_seqlens_in_batch(attention_mask)
indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
max_seqlen_in_batch = seqlens_in_batch.max().item()
@@ -80,4 +114,4 @@ import transformers.modeling_flash_attention_utils
transformers.modeling_flash_attention_utils._get_unpad_data = get_unpad_data
- logger.info_rank0("Using block diagonal attention for sequence packing without cross-attention.")+ logger.info_rank0("Using block diagonal attention for sequence packing without cross-attention.")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/model/model_utils/packing.py |
Add standardized docstrings across the file | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fire
import torch
import torch.distributed.checkpoint as dcp
from transformers import AutoModelForCausalLM
def convert(hf_path: str, dcp_path: str) -> None:
if not hf_path or not dcp_path:
raise ValueError("Both 'hf_path' and 'dcp_path' are required.")
print(f"Loading HF model from {hf_path}...")
model = AutoModelForCausalLM.from_pretrained(hf_path, device_map="cpu", torch_dtype=torch.bfloat16)
print(f"Saving to DCP format at {dcp_path}...")
dcp.save(model.state_dict(), checkpoint_id=dcp_path)
print("Done!")
def help() -> None:
print(__doc__)
if __name__ == "__main__":
fire.Fire({"convert": convert, "help": help, "--convert": convert}) | --- +++ @@ -12,6 +12,15 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Convert a HuggingFace model to DCP checkpoint format.
+
+Usage:
+ python scripts/hf2dcp.py convert --hf_path=/path/to/hf --dcp_path=/path/to/dcp
+
+Arguments:
+ hf_path: Path to the HuggingFace model directory.
+ dcp_path: Output path (directory) for DCP checkpoint.
+"""
import fire
import torch
@@ -20,6 +29,12 @@
def convert(hf_path: str, dcp_path: str) -> None:
+ """Convert HF model weights to DCP.
+
+ Args:
+ hf_path: HuggingFace model directory.
+ dcp_path: Output path (directory) for DCP checkpoint.
+ """
if not hf_path or not dcp_path:
raise ValueError("Both 'hf_path' and 'dcp_path' are required.")
@@ -32,8 +47,9 @@
def help() -> None:
+ """Show help message."""
print(__doc__)
if __name__ == "__main__":
- fire.Fire({"convert": convert, "help": help, "--convert": convert})+ fire.Fire({"convert": convert, "help": help, "--convert": convert})
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/scripts/hf2dcp.py |
Generate NumPy-style docstrings | # Copyright 2025 Optuna, HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/utils/logging.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import threading
from concurrent.futures import ThreadPoolExecutor
from functools import lru_cache
from typing import Optional
from .constants import RUNNING_LOG
_thread_lock = threading.RLock()
_default_handler: Optional["logging.Handler"] = None
_default_log_level: "logging._Level" = logging.INFO
class LoggerHandler(logging.Handler):
def __init__(self, output_dir: str) -> None:
super().__init__()
self._formatter = logging.Formatter(
fmt="[%(levelname)s|%(asctime)s] %(filename)s:%(lineno)s >> %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
self.setLevel(logging.INFO)
self.thread_pool = ThreadPoolExecutor(max_workers=1)
os.makedirs(output_dir, exist_ok=True)
self.running_log = os.path.join(output_dir, RUNNING_LOG)
try:
os.remove(self.running_log)
except OSError:
pass
def _write_log(self, log_entry: str) -> None:
with open(self.running_log, "a", encoding="utf-8") as f:
f.write(log_entry + "\n")
def emit(self, record) -> None:
if record.name == "httpx":
return
log_entry = self._formatter.format(record)
self.thread_pool.submit(self._write_log, log_entry)
def close(self) -> None:
self.thread_pool.shutdown(wait=True)
return super().close()
class _Logger(logging.Logger):
def info_rank0(self, *args, **kwargs) -> None:
self.info(*args, **kwargs)
def warning_rank0(self, *args, **kwargs) -> None:
self.warning(*args, **kwargs)
def warning_rank0_once(self, *args, **kwargs) -> None:
self.warning(*args, **kwargs)
def _get_default_logging_level() -> "logging._Level":
env_level_str = os.getenv("LLAMAFACTORY_VERBOSITY", None)
if env_level_str:
if env_level_str.upper() in logging._nameToLevel:
return logging._nameToLevel[env_level_str.upper()]
else:
raise ValueError(f"Unknown logging level: {env_level_str}.")
return _default_log_level
def _get_library_name() -> str:
return __name__.split(".")[0]
def _get_library_root_logger() -> "_Logger":
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
global _default_handler
with _thread_lock:
if _default_handler: # already configured
return
formatter = logging.Formatter(
fmt="[%(levelname)s|%(asctime)s] %(name)s:%(lineno)s >> %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
_default_handler = logging.StreamHandler(sys.stdout)
_default_handler.setFormatter(formatter)
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
library_root_logger.propagate = False
def get_logger(name: str | None = None) -> "_Logger":
if name is None:
name = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(name)
def add_handler(handler: "logging.Handler") -> None:
_configure_library_root_logger()
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
_configure_library_root_logger()
_get_library_root_logger().removeHandler(handler)
def info_rank0(self: "logging.Logger", *args, **kwargs) -> None:
if int(os.getenv("LOCAL_RANK", "0")) == 0:
self.info(*args, **kwargs)
def warning_rank0(self: "logging.Logger", *args, **kwargs) -> None:
if int(os.getenv("LOCAL_RANK", "0")) == 0:
self.warning(*args, **kwargs)
@lru_cache(None)
def warning_rank0_once(self: "logging.Logger", *args, **kwargs) -> None:
if int(os.getenv("LOCAL_RANK", "0")) == 0:
self.warning(*args, **kwargs)
logging.Logger.info_rank0 = info_rank0
logging.Logger.warning_rank0 = warning_rank0
logging.Logger.warning_rank0_once = warning_rank0_once | --- +++ @@ -32,6 +32,7 @@
class LoggerHandler(logging.Handler):
+ r"""Redirect the logging output to the logging file for LLaMA Board."""
def __init__(self, output_dir: str) -> None:
super().__init__()
@@ -65,6 +66,7 @@
class _Logger(logging.Logger):
+ r"""A logger that supports rank0 logging."""
def info_rank0(self, *args, **kwargs) -> None:
self.info(*args, **kwargs)
@@ -77,6 +79,7 @@
def _get_default_logging_level() -> "logging._Level":
+ r"""Return the default logging level."""
env_level_str = os.getenv("LLAMAFACTORY_VERBOSITY", None)
if env_level_str:
if env_level_str.upper() in logging._nameToLevel:
@@ -96,6 +99,7 @@
def _configure_library_root_logger() -> None:
+ r"""Configure root logger using a stdout stream handler with an explicit format."""
global _default_handler
with _thread_lock:
@@ -115,6 +119,7 @@
def get_logger(name: str | None = None) -> "_Logger":
+ r"""Return a logger with the specified name. It it not supposed to be accessed externally."""
if name is None:
name = _get_library_name()
@@ -123,11 +128,13 @@
def add_handler(handler: "logging.Handler") -> None:
+ r"""Add a handler to the root logger."""
_configure_library_root_logger()
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
+ r"""Remove a handler to the root logger."""
_configure_library_root_logger()
_get_library_root_logger().removeHandler(handler)
@@ -150,4 +157,4 @@
logging.Logger.info_rank0 = info_rank0
logging.Logger.warning_rank0 = warning_rank0
-logging.Logger.warning_rank0_once = warning_rank0_once+logging.Logger.warning_rank0_once = warning_rank0_once
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/extras/logging.py |
Add docstrings to make code maintainable | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's Transformers and Optimum library.
# https://github.com/huggingface/transformers/blob/v4.41.0/src/transformers/utils/quantization_config.py
# https://github.com/huggingface/optimum/blob/v1.20.0/optimum/gptq/data.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from typing import TYPE_CHECKING, Any
import torch
from datasets import load_dataset
from transformers import BitsAndBytesConfig, EetqConfig, GPTQConfig, HqqConfig
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.modeling_utils import is_fsdp_enabled
from ...extras import logging
from ...extras.constants import FILEEXT2TYPE, QuantizationMethod
from ...extras.misc import check_version, get_current_device
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedTokenizer
from ...hparams import ModelArguments
logger = logging.get_logger(__name__)
def _get_quantization_dataset(tokenizer: "PreTrainedTokenizer", model_args: "ModelArguments") -> list[dict[str, Any]]:
if os.path.isfile(model_args.export_quantization_dataset):
data_path = FILEEXT2TYPE.get(model_args.export_quantization_dataset.split(".")[-1], None)
data_files = model_args.export_quantization_dataset
else:
data_path = model_args.export_quantization_dataset
data_files = None
dataset = load_dataset(
path=data_path,
data_files=data_files,
split="train",
cache_dir=model_args.cache_dir,
token=model_args.hf_hub_token,
)
samples = []
maxlen = model_args.export_quantization_maxlen
for _ in range(model_args.export_quantization_nsamples):
n_try = 0
while True:
if n_try > 100:
raise ValueError("Cannot find satisfying example, considering decrease `export_quantization_maxlen`.")
sample_idx = random.randint(0, len(dataset) - 1)
sample: dict[str, torch.Tensor] = tokenizer(dataset[sample_idx]["text"], return_tensors="pt")
n_try += 1
if sample["input_ids"].size(1) > maxlen:
break # TODO: fix large maxlen
word_idx = random.randint(0, sample["input_ids"].size(1) - maxlen - 1)
input_ids = sample["input_ids"][:, word_idx : word_idx + maxlen]
attention_mask = sample["attention_mask"][:, word_idx : word_idx + maxlen]
samples.append({"input_ids": input_ids.tolist(), "attention_mask": attention_mask.tolist()})
return samples
def configure_quantization(
config: "PretrainedConfig",
tokenizer: "PreTrainedTokenizer",
model_args: "ModelArguments",
is_trainable: bool,
init_kwargs: dict[str, Any],
) -> None:
if getattr(config, "quantization_config", None): # ptq
if model_args.quantization_bit is not None:
logger.warning_rank0("`quantization_bit` will not affect on the PTQ-quantized models.")
quantization_config: dict[str, Any] = getattr(config, "quantization_config", None)
quant_method = quantization_config.get("quant_method", "")
if quant_method not in (QuantizationMethod.MXFP4, QuantizationMethod.FP8) and (
is_deepspeed_zero3_enabled() or is_fsdp_enabled()
):
# mxfp4 will dequant the model weights
raise ValueError("DeepSpeed ZeRO-3 or FSDP is incompatible with PTQ-quantized models.")
if quant_method == QuantizationMethod.MXFP4:
from transformers import Mxfp4Config
quant_config = Mxfp4Config(dequantize=True)
init_kwargs["quantization_config"] = quant_config
init_kwargs["ignore_mismatched_sizes"] = True
if quant_method == QuantizationMethod.FP8:
from transformers import FineGrainedFP8Config
quant_config = FineGrainedFP8Config(dequantize=True)
init_kwargs["quantization_config"] = quant_config
init_kwargs["ignore_mismatched_sizes"] = True
if quant_method == QuantizationMethod.GPTQ:
check_version("gptqmodel>=2.0.0", mandatory=True)
quantization_config.pop("disable_exllama", None) # remove deprecated args
quantization_config["use_exllama"] = False # disable exllama
if quant_method == QuantizationMethod.AWQ:
check_version("autoawq", mandatory=True)
if quant_method == QuantizationMethod.AQLM:
check_version("aqlm>=1.1.0", mandatory=True)
quantization_config["bits"] = 2
quant_bits = quantization_config.get("bits", "?")
logger.info_rank0(f"Loading {quant_bits}-bit {quant_method.upper()}-quantized model.")
elif model_args.export_quantization_bit is not None: # gptqmodel
if model_args.export_quantization_bit not in [8, 4, 3, 2]:
raise ValueError("AutoGPTQ only accepts 2/3/4/8-bit quantization.")
check_version("optimum>=1.24.0", mandatory=True)
check_version("gptqmodel>=2.0.0", mandatory=True)
from accelerate.utils import get_max_memory
if getattr(config, "model_type", None) == "chatglm":
raise ValueError("ChatGLM model is not supported yet.")
try:
from optimum.gptq import utils as gq_utils
if "language_model.model.layers" not in gq_utils.BLOCK_PATTERNS:
gq_utils.BLOCK_PATTERNS.insert(0, "language_model.model.layers")
except ImportError:
pass
block_name_to_quantize = None
if getattr(config, "model_type", None) in ["gemma3", "paligemma"]:
block_name_to_quantize = "language_model.model.layers"
init_kwargs["quantization_config"] = GPTQConfig(
bits=model_args.export_quantization_bit,
tokenizer=tokenizer,
dataset=_get_quantization_dataset(tokenizer, model_args),
block_name_to_quantize=block_name_to_quantize,
)
init_kwargs["device_map"] = "auto"
init_kwargs["max_memory"] = get_max_memory()
model_args.compute_dtype = torch.float16 # force fp16 for gptqmodel
logger.info_rank0(f"Quantizing model to {model_args.export_quantization_bit} bit with GPTQModel.")
elif model_args.quantization_bit is not None: # on-the-fly
if model_args.quantization_method == QuantizationMethod.BNB:
if model_args.quantization_bit == 8:
check_version("bitsandbytes>=0.37.0", mandatory=True)
init_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True)
elif model_args.quantization_bit == 4:
check_version("bitsandbytes>=0.39.0", mandatory=True)
init_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=model_args.compute_dtype,
bnb_4bit_use_double_quant=model_args.double_quantization,
bnb_4bit_quant_type=model_args.quantization_type,
bnb_4bit_quant_storage=model_args.compute_dtype, # crucial for fsdp+qlora
)
else:
raise ValueError("Bitsandbytes only accepts 4-bit or 8-bit quantization.")
# Do not assign device map if:
# 1. deepspeed zero3 or fsdp (train)
# 2. auto quantization device map (inference)
if is_deepspeed_zero3_enabled() or is_fsdp_enabled() or model_args.quantization_device_map == "auto":
if model_args.quantization_bit != 4:
raise ValueError("Only 4-bit quantized model can use fsdp+qlora or auto device map.")
check_version("bitsandbytes>=0.43.0", mandatory=True)
else:
init_kwargs["device_map"] = {"": get_current_device()} # change auto device map for inference
logger.info_rank0(f"Quantizing model to {model_args.quantization_bit} bit with bitsandbytes.")
elif model_args.quantization_method == QuantizationMethod.HQQ:
if model_args.quantization_bit not in [8, 6, 5, 4, 3, 2, 1]:
raise ValueError("HQQ only accepts 1/2/3/4/5/6/8-bit quantization.")
if is_deepspeed_zero3_enabled() or is_fsdp_enabled():
raise ValueError("HQQ quantization is incompatible with DeepSpeed ZeRO-3 or FSDP.")
check_version("hqq", mandatory=True)
init_kwargs["quantization_config"] = HqqConfig(
nbits=model_args.quantization_bit, quant_zero=False, quant_scale=False, axis=0
) # use ATEN kernel (axis=0) for performance
logger.info_rank0(f"Quantizing model to {model_args.quantization_bit} bit with HQQ.")
elif model_args.quantization_method == QuantizationMethod.EETQ:
if model_args.quantization_bit != 8:
raise ValueError("EETQ only accepts 8-bit quantization.")
if is_deepspeed_zero3_enabled() or is_fsdp_enabled():
raise ValueError("EETQ quantization is incompatible with DeepSpeed ZeRO-3 or FSDP.")
check_version("eetq", mandatory=True)
init_kwargs["quantization_config"] = EetqConfig()
logger.info_rank0(f"Quantizing model to {model_args.quantization_bit} bit with EETQ.") | --- +++ @@ -41,6 +41,7 @@
def _get_quantization_dataset(tokenizer: "PreTrainedTokenizer", model_args: "ModelArguments") -> list[dict[str, Any]]:
+ r"""Prepare the tokenized dataset to perform AutoGPTQ. Do not use tensor output for JSON serialization."""
if os.path.isfile(model_args.export_quantization_dataset):
data_path = FILEEXT2TYPE.get(model_args.export_quantization_dataset.split(".")[-1], None)
data_files = model_args.export_quantization_dataset
@@ -85,6 +86,7 @@ is_trainable: bool,
init_kwargs: dict[str, Any],
) -> None:
+ r"""Priority: PTQ-quantized (train/infer) > AutoGPTQ (export) > On-the-fly quantization (train/infer)."""
if getattr(config, "quantization_config", None): # ptq
if model_args.quantization_bit is not None:
logger.warning_rank0("`quantization_bit` will not affect on the PTQ-quantized models.")
@@ -211,4 +213,4 @@
check_version("eetq", mandatory=True)
init_kwargs["quantization_config"] = EetqConfig()
- logger.info_rank0(f"Quantizing model to {model_args.quantization_bit} bit with EETQ.")+ logger.info_rank0(f"Quantizing model to {model_args.quantization_bit} bit with EETQ.")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/model/model_utils/quantization.py |
Add docstrings for production code | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import fire
from peft import PeftModel
from transformers import AutoConfig, AutoModelForTextToWaveform, AutoProcessor
from transformers.utils import cached_file
def merge_lora(
model_path: str,
lora_path: str,
save_path: str = "./merged_model_checkpoint",
extra_file: str = "spk_dict.pt",
submodule_name: str = "thinker",
):
# 1. Load the original model
model = AutoModelForTextToWaveform.from_pretrained(model_path, torch_dtype="auto", device_map="cpu")
print("Successfully loaded the original model.")
# 2. Extract the submodule to be merged (e.g., model.thinker)
if not hasattr(model, submodule_name):
raise AttributeError(f"The model does not have a submodule named '{submodule_name}'.")
base_submodule = getattr(model, submodule_name)
print(f"Successfully extracted submodule: {submodule_name}.")
# 3. Load the LoRA weights onto the extracted submodule
lora_model = PeftModel.from_pretrained(base_submodule, lora_path)
processor = AutoProcessor.from_pretrained(lora_path)
print("Successfully loaded LoRA weights and processor.")
# 4. Merge the LoRA weights into the submodule and unload the LoRA modules
merged_submodule = lora_model.merge_and_unload()
print("Successfully merged LoRA weights.")
# 5. Replace the original submodule with the merged submodule in the model
setattr(model, submodule_name, merged_submodule)
# 6. Save the final merged model along with the tokenizer and processor configuration
model.save_pretrained(save_path)
processor.save_pretrained(save_path)
print(f"Merged model and processor saved to {save_path}.")
try:
source_file = cached_file(path_or_repo_id=model_path, filename=extra_file)
shutil.copy(source_file, os.path.join(save_path, extra_file))
print(f"File '{extra_file}' copied from {model_path} to {save_path}.")
except Exception:
print(f"File '{extra_file}' not found in {model_path}, skipping copy.")
def save_full_model(
model_path: str,
thinker_path: str,
save_path: str = "./merged_model_checkpoint",
extra_file: str = "spk_dict.pt",
):
# 1. Load the saved thinker module and the original model
config = AutoConfig.from_pretrained(model_path)
if getattr(config, "model_type") == "qwen2_5_omni":
from transformers.models.qwen2_5_omni import Qwen2_5OmniThinkerForConditionalGeneration # type: ignore
ThinkerClass = Qwen2_5OmniThinkerForConditionalGeneration
elif getattr(config, "model_type") == "qwen3_omni_moe":
from transformers.models.qwen3_omni_moe import Qwen3OmniMoeThinkerForConditionalGeneration # type: ignore
ThinkerClass = Qwen3OmniMoeThinkerForConditionalGeneration
else:
raise ValueError(f"Unsupported model type: {getattr(config, 'model_type')}.")
thinker = ThinkerClass.from_pretrained(thinker_path, torch_dtype="auto", device_map="cpu")
base_model = AutoModelForTextToWaveform.from_pretrained(model_path, torch_dtype="auto", device_map="cpu")
base_model.thinker = thinker
processor = AutoProcessor.from_pretrained(thinker_path)
print("Successfully loaded model weights and processor.")
# 2. Save the complete model along with its tokenizer and processor configuration
base_model.save_pretrained(save_path)
processor.save_pretrained(save_path)
print(f"Merged model and processor saved to {save_path}.")
# 3. Copy the extra file from the base model directory to the save_path
try:
source_file = cached_file(path_or_repo_id=model_path, filename=extra_file)
shutil.copy(source_file, os.path.join(save_path, extra_file))
print(f"File '{extra_file}' copied from {model_path} to {save_path}.")
except Exception:
print(f"File '{extra_file}' not found in {model_path}, skipping copy.")
if __name__ == "__main__":
fire.Fire({"save_full": save_full_model, "merge_lora": merge_lora}) | --- +++ @@ -12,6 +12,17 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Why we need this script for qwen_omni?
+
+Because the qwen_omni model is constructed by two parts:
+1. [Thinker]:[audio_encoder, vision_encoder, LLM backbone], which our repository does support to post-training.
+2. [Talker]: [audio_decoder, wave_model], which is not supported to post-training without specific tokenizer.
+When we post-training the model, we exactly train the [Thinker] part, and the [Talker] part is dropped.
+So, to get the complete model, we need to merge the [Talker] part back to the [Thinker] part.
+LoRA mode: [Thinker + LoRA weights] + [Original Talker] -> [Omni model]
+Full mode: [Thinker] + [Original Talker] -> [Omni model]
+For Processor, we do saved the processor from trained model instead of the original model.
+"""
import os
import shutil
@@ -29,6 +40,17 @@ extra_file: str = "spk_dict.pt",
submodule_name: str = "thinker",
):
+ """Load the original model, merge the LoRA weights.
+
+ For a specified submodule, and save the final merged model along with its configurations.
+
+ Args:
+ model_path (str): Path to the original model directory.
+ lora_path (str): Path to the directory containing LoRA weights.
+ save_path (str): Directory where the merged model and configurations will be saved.
+ extra_file (str): Name of the extra file to be copied (default: "spk_dict.pt").
+ submodule_name (str): Name of the submodule to merge (default: "thinker").
+ """
# 1. Load the original model
model = AutoModelForTextToWaveform.from_pretrained(model_path, torch_dtype="auto", device_map="cpu")
print("Successfully loaded the original model.")
@@ -71,6 +93,16 @@ save_path: str = "./merged_model_checkpoint",
extra_file: str = "spk_dict.pt",
):
+ """Load the saved thinker module and the original model, replace the thinker in the original model.
+
+ Then save the complete model along with its tokenizer and processor configuration.
+
+ Args:
+ model_path (str): Directory path of the original model.
+ thinker_path (str): Path to the saved thinker weights.
+ save_path (str): Directory where the merged model and configurations will be saved.
+ extra_file (str): Name of the extra file to be copied (default: "spk_dict.pt").
+ """
# 1. Load the saved thinker module and the original model
config = AutoConfig.from_pretrained(model_path)
if getattr(config, "model_type") == "qwen2_5_omni":
@@ -105,4 +137,4 @@
if __name__ == "__main__":
- fire.Fire({"save_full": save_full_model, "merge_lora": merge_lora})+ fire.Fire({"save_full": save_full_model, "merge_lora": merge_lora})
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/scripts/qwen_omni_merge.py |
Write docstrings describing functionality | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from collections.abc import AsyncGenerator
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Literal, Optional, Union
if TYPE_CHECKING:
from transformers import PreTrainedModel, PreTrainedTokenizer
from vllm import AsyncLLMEngine
from ..data import Template
from ..data.mm_plugin import AudioInput, ImageInput, VideoInput
from ..extras.constants import EngineName
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
@dataclass
class Response:
response_text: str
response_length: int
prompt_length: int
finish_reason: Literal["stop", "length"]
class BaseEngine(ABC):
name: "EngineName"
model: Union["PreTrainedModel", "AsyncLLMEngine"]
tokenizer: "PreTrainedTokenizer"
can_generate: bool
template: "Template"
generating_args: dict[str, Any]
@abstractmethod
def __init__(
self,
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
) -> None:
...
@abstractmethod
async def chat(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> list["Response"]:
...
@abstractmethod
async def stream_chat(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
...
@abstractmethod
async def get_scores(
self,
batch_input: list[str],
**input_kwargs,
) -> list[float]:
... | --- +++ @@ -37,6 +37,10 @@
class BaseEngine(ABC):
+ r"""Base class for inference engine of chat models.
+
+ Must implements async methods: chat(), stream_chat() and get_scores().
+ """
name: "EngineName"
model: Union["PreTrainedModel", "AsyncLLMEngine"]
@@ -53,6 +57,7 @@ finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
) -> None:
+ r"""Initialize an inference engine."""
...
@abstractmethod
@@ -66,6 +71,7 @@ audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> list["Response"]:
+ r"""Get a list of responses of the chat model."""
...
@abstractmethod
@@ -79,6 +85,7 @@ audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
+ r"""Get the response token-by-token of the chat model."""
...
@abstractmethod
@@ -87,4 +94,5 @@ batch_input: list[str],
**input_kwargs,
) -> list[float]:
- ...+ r"""Get a list of scores of the reward model."""
+ ...
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/chat/base_engine.py |
Write clean docstrings for readability | # Copyright 2025 Moonshot AI and the LlamaFactory team.
#
# This code is based on the MoonshotAI's Moonlight library.
# https://github.com/MoonshotAI/Moonlight/blob/master/examples/toy_train.py
# and the Keller Jordan's Muon library.
# https://github.com/KellerJordan/Muon/blob/master/muon.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# MIT License
#
# Copyright (c) 2025 Moonshot AI
# Copyright (c) 2024 Keller Jordan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
def zeropower_via_newtonschulz5(G: "torch.Tensor", steps: int) -> "torch.Tensor":
assert len(G.shape) == 2
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.bfloat16()
if G.size(0) > G.size(1):
X = X.T
# Ensure spectral norm is at most 1
X = X / (X.norm() + 1e-7)
# Perform the NS iterations
for _ in range(steps):
A = X @ X.T
B = b * A + c * A @ A # adapted from suggestion by @jxbz, @leloykun, and @YouJiacheng
X = a * X + B @ X
if G.size(0) > G.size(1):
X = X.T
return X
class Muon(torch.optim.Optimizer):
def __init__(
self,
lr=1e-3,
wd=0.1,
muon_params=None,
momentum=0.95,
nesterov=True,
ns_steps=5,
adamw_params=None,
adamw_betas=(0.9, 0.95),
adamw_eps=1e-8,
):
defaults = dict(
lr=lr,
wd=wd,
momentum=momentum,
nesterov=nesterov,
ns_steps=ns_steps,
adamw_betas=adamw_betas,
adamw_eps=adamw_eps,
)
params = list(muon_params)
adamw_params = list(adamw_params) if adamw_params is not None else []
params.extend(adamw_params)
super().__init__(params, defaults)
# Sort parameters into those for which we will use Muon, and those for which we will not
for p in muon_params:
# Use Muon for every parameter in muon_params which is >= 2D and doesn't look like an embedding or head layer
assert p.ndim == 2, p.ndim
self.state[p]["use_muon"] = True
for p in adamw_params:
# Do not use Muon for parameters in adamw_params
self.state[p]["use_muon"] = False
def adjust_lr_for_muon(self, lr: float, param_shape: list[int]) -> float:
A, B = param_shape[:2]
# We adjust the learning rate and weight decay based on the size of the parameter matrix
# as describted in the paper
adjusted_ratio = 0.2 * math.sqrt(max(A, B))
adjusted_lr = lr * adjusted_ratio
return adjusted_lr
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
# Muon loop
params = [p for p in group["params"] if self.state[p]["use_muon"]]
lr = group["lr"]
wd = group["wd"]
momentum = group["momentum"]
# generate weight updates in distributed fashion
for p in params:
# sanity check
g = p.grad
if g is None:
continue
if g.ndim > 2:
g = g.view(g.size(0), -1)
assert g is not None
# calc update
state = self.state[p]
if "momentum_buffer" not in state:
state["momentum_buffer"] = torch.zeros_like(g)
buf = state["momentum_buffer"]
buf.mul_(momentum).add_(g)
if group["nesterov"]:
g = g.add(buf, alpha=momentum)
else:
g = buf
u = zeropower_via_newtonschulz5(g, steps=group["ns_steps"])
# scale update
adjusted_lr = self.adjust_lr_for_muon(lr, p.shape)
# apply weight decay
p.data.mul_(1 - lr * wd)
# apply update
p.data.add_(u, alpha=-adjusted_lr)
# Adam backup
params = [p for p in group["params"] if not self.state[p]["use_muon"]]
lr = group["lr"]
beta1, beta2 = group["adamw_betas"]
eps = group["adamw_eps"]
weight_decay = group["wd"]
for p in params:
g = p.grad
if g is None:
continue
state = self.state[p]
if "step" not in state:
state["step"] = 0
state["moment1"] = torch.zeros_like(g)
state["moment2"] = torch.zeros_like(g)
state["step"] += 1
step = state["step"]
buf1 = state["moment1"]
buf2 = state["moment2"]
buf1.lerp_(g, 1 - beta1)
buf2.lerp_(g.square(), 1 - beta2)
g = buf1 / (eps + buf2.sqrt())
bias_correction1 = 1 - beta1**step
bias_correction2 = 1 - beta2**step
scale = bias_correction1 / bias_correction2**0.5
p.data.mul_(1 - lr * weight_decay)
p.data.add_(g, alpha=-lr / scale)
return loss | --- +++ @@ -46,6 +46,15 @@
def zeropower_via_newtonschulz5(G: "torch.Tensor", steps: int) -> "torch.Tensor":
+ """Newton-Schulz iteration to compute the zeroth power / orthogonalization of G.
+
+ We opt to use a quintic iteration whose coefficients are selected to maximize the slope at zero.
+ For the purpose of minimizing steps, it turns out to be empirically effective to keep increasing
+ the slope at zero even beyond the point where the iteration no longer converges all the way to
+ one everywhere on the interval. This iteration therefore does not produce UV^T but rather something
+ like US'V^T where S' is diagonal with S_{ii}' ~ Uniform(0.5, 1.5), which turns out not to hurt model
+ performance at all relative to UV^T, where USV^T = G is the SVD.
+ """
assert len(G.shape) == 2
a, b, c = (3.4445, -4.7750, 2.0315)
X = G.bfloat16()
@@ -65,6 +74,30 @@
class Muon(torch.optim.Optimizer):
+ """Muon - MomentUm Orthogonalized by Newton-schulz.
+
+ Muon internally runs standard SGD-momentum, and then performs an orthogonalization post-
+ processing step, in which each 2D parameter's update is replaced with the nearest orthogonal
+ matrix. To efficiently orthogonalize each update, we use a Newton-Schulz iteration, which has
+ the advantage that it can be stably run in bfloat16 on the GPU.
+
+ Some warnings:
+ - We believe this optimizer is unlikely to work well for training with small batch size.
+ - We believe it may not work well for finetuning pretrained models, but we haven't tested this.
+
+ Arguments:
+ muon_params: The parameters to be optimized by Muon.
+ lr: The learning rate. The updates will have spectral norm of `lr`. (0.02 is a good default)
+ momentum: The momentum used by the internal SGD. (0.95 is a good default)
+ nesterov: Whether to use Nesterov-style momentum in the internal SGD. (recommended)
+ ns_steps: The number of Newton-Schulz iterations to run. (6 is probably always enough)
+ adamw_params: The parameters to be optimized by AdamW. Any parameters in `muon_params` which are
+ {0, 1}-D or are detected as being the embed or lm_head will be optimized by AdamW as well.
+ adamw_lr: The learning rate for the internal AdamW.
+ adamw_betas: The betas for the internal AdamW.
+ adamw_eps: The epsilon for the internal AdamW.
+ adamw_wd: The weight decay for the internal AdamW.
+ """
def __init__(
self,
@@ -110,6 +143,12 @@ return adjusted_lr
def step(self, closure=None):
+ """Perform a single optimization step.
+
+ Args:
+ closure (Callable, optional): A closure that reevaluates the model
+ and returns the loss.
+ """
loss = None
if closure is not None:
with torch.enable_grad():
@@ -184,4 +223,4 @@ p.data.mul_(1 - lr * weight_decay)
p.data.add_(g, alpha=-lr / scale)
- return loss+ return loss
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/third_party/muon/muon.py |
Add docstrings including usage examples | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's Transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/llava/processing_llava.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import math
import os
import re
from copy import deepcopy
from dataclasses import dataclass
from io import BytesIO
from typing import TYPE_CHECKING, BinaryIO, Literal, NotRequired, Optional, TypedDict, Union
import numpy as np
import torch
import torchaudio
from transformers.image_utils import get_image_size, is_valid_image, to_numpy_array
from transformers.models.mllama.processing_mllama import (
convert_sparse_cross_attention_mask_to_dense,
get_cross_attention_token_mask,
)
from typing_extensions import override
from ..extras.constants import AUDIO_PLACEHOLDER, IGNORE_INDEX, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER
from ..extras.packages import is_pillow_available, is_pyav_available, is_transformers_version_greater_than
if is_pillow_available():
from PIL import Image
from PIL.Image import Image as ImageObject
if is_pyav_available():
import av
if is_transformers_version_greater_than("4.52.0"):
from transformers.image_utils import make_flat_list_of_images
from transformers.video_utils import make_batched_videos
else:
from transformers.image_utils import make_batched_videos, make_flat_list_of_images
if TYPE_CHECKING:
from av.stream import Stream
from numpy.typing import NDArray
from transformers import PreTrainedTokenizer, ProcessorMixin
from transformers.feature_extraction_sequence_utils import SequenceFeatureExtractor
from transformers.image_processing_utils import BaseImageProcessor
from transformers.video_processing_utils import BaseVideoProcessor
class EncodedImage(TypedDict):
path: str | None
bytes: bytes | None
ImageInput = Union[str, bytes, EncodedImage, BinaryIO, ImageObject]
VideoInput = Union[str, BinaryIO, list[list[ImageInput]]]
AudioInput = Union[str, BinaryIO, NDArray]
class RegularizedImageOutput(TypedDict):
images: list[ImageObject]
class RegularizedVideoOutput(TypedDict):
videos: list[list[ImageObject]]
durations: list[float]
fps_per_video: NotRequired[list[float]]
class RegularizedAudioOutput(TypedDict):
audios: list[NDArray]
sampling_rates: list[float]
class MMProcessor(ProcessorMixin):
patch_size: int
image_seq_length: int
num_additional_image_tokens: int
vision_feature_select_strategy: Literal["default", "full"]
def _get_number_of_features(self, orig_height: int, orig_width: int, height: int, width: int) -> int:
pass
def _get_paligemma_token_type_ids(imglens: list[int], seqlens: list[int], processor: "MMProcessor") -> list[list[int]]:
batch_token_type_ids = []
for imglen, seqlen in zip(imglens, seqlens):
image_seqlen = imglen * processor.image_seq_length
batch_token_type_ids.append([0] * image_seqlen + [1] * (seqlen - image_seqlen))
return batch_token_type_ids
def _get_gemma3_token_type_ids(batch_ids: list[list[int]], processor: "MMProcessor"):
image_token_id: int = getattr(processor, "image_token_id")
batch_token_type_ids = []
for token_ids in batch_ids:
token_ids = np.array(token_ids)
token_type_ids = np.zeros_like(token_ids)
token_type_ids[token_ids == image_token_id] = 1
batch_token_type_ids.append(token_type_ids.tolist())
return batch_token_type_ids
def _make_batched_images(images: list["ImageObject"], imglens: list[int]) -> list[list["ImageObject"]]:
batch_images = []
for imglen in imglens:
batch_images.append(images[:imglen])
images = images[imglen:]
return batch_images
def _check_video_is_nested_images(video: "VideoInput") -> bool:
return isinstance(video, list) and all(isinstance(frame, (str, BinaryIO, dict, ImageObject)) for frame in video)
@dataclass
class MMPluginMixin:
image_token: str | None
video_token: str | None
audio_token: str | None
expand_mm_tokens: bool = True
def _validate_input(
self,
processor: Optional["MMProcessor"],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
) -> None:
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
video_processor: BaseImageProcessor = getattr(
processor, "video_processor", getattr(processor, "image_processor", None)
)
feature_extractor: SequenceFeatureExtractor = getattr(processor, "feature_extractor", None) or getattr(
processor, "audio_processor", None
)
if len(images) != 0 and self.image_token is None:
raise ValueError(
"This model does not support image input. Please check whether the correct `template` is used."
)
if len(videos) != 0 and self.video_token is None:
raise ValueError(
"This model does not support video input. Please check whether the correct `template` is used."
)
if len(audios) != 0 and self.audio_token is None:
raise ValueError(
"This model does not support audio input. Please check whether the correct `template` is used."
)
if self.image_token is not None and processor is None:
raise ValueError("Processor was not found, please check and update your model file.")
if self.image_token is not None and image_processor is None:
raise ValueError("Image processor was not found, please check and update your model file.")
if self.video_token is not None and video_processor is None:
raise ValueError("Video processor was not found, please check and update your model file.")
if self.audio_token is not None and feature_extractor is None:
raise ValueError("Audio feature extractor was not found, please check and update your model file.")
def _validate_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
):
num_image_tokens, num_video_tokens, num_audio_tokens = 0, 0, 0
for message in messages:
num_image_tokens += message["content"].count(IMAGE_PLACEHOLDER)
num_video_tokens += message["content"].count(VIDEO_PLACEHOLDER)
num_audio_tokens += message["content"].count(AUDIO_PLACEHOLDER)
if len(images) != num_image_tokens:
raise ValueError(
f"The number of images does not match the number of {IMAGE_PLACEHOLDER} tokens in {messages}."
)
if len(videos) != num_video_tokens:
raise ValueError(
f"The number of videos does not match the number of {VIDEO_PLACEHOLDER} tokens in {messages}."
)
if len(audios) != num_audio_tokens:
raise ValueError(
f"The number of audios does not match the number of {AUDIO_PLACEHOLDER} tokens in {messages}."
)
def _preprocess_image(
self, image: "ImageObject", image_max_pixels: int, image_min_pixels: int, **kwargs
) -> "ImageObject":
if (image.width * image.height) > image_max_pixels:
resize_factor = math.sqrt(image_max_pixels / (image.width * image.height))
width, height = int(image.width * resize_factor), int(image.height * resize_factor)
image = image.resize((width, height))
if (image.width * image.height) < image_min_pixels:
resize_factor = math.sqrt(image_min_pixels / (image.width * image.height))
width, height = int(image.width * resize_factor), int(image.height * resize_factor)
image = image.resize((width, height))
if image.mode != "RGB":
image = image.convert("RGB")
return image
def _get_video_sample_indices(
self, video_stream: "Stream", video_fps: float, video_maxlen: int, **kwargs
) -> list[int]:
total_frames = video_stream.frames
if total_frames == 0: # infinite video
return np.linspace(0, video_maxlen - 1, video_maxlen).astype(np.int32)
sample_frames = max(1, math.floor(float(video_stream.duration * video_stream.time_base) * video_fps))
sample_frames = min(total_frames, video_maxlen, sample_frames)
return np.linspace(0, total_frames - 1, sample_frames).astype(np.int32)
def _regularize_images(self, images: list["ImageInput"], **kwargs) -> "RegularizedImageOutput":
results = []
for image in images:
if isinstance(image, (str, BinaryIO)):
image = Image.open(image)
elif isinstance(image, bytes):
image = Image.open(BytesIO(image))
elif isinstance(image, dict):
if image["bytes"] is not None:
image = Image.open(BytesIO(image["bytes"]))
else:
image = Image.open(image["path"])
if not isinstance(image, ImageObject):
raise ValueError(f"Expect input is a list of images, but got {type(image)}.")
results.append(self._preprocess_image(image, **kwargs))
return {"images": results}
def _regularize_videos(self, videos: list["VideoInput"], **kwargs) -> "RegularizedVideoOutput":
results = []
durations = []
for video in videos:
frames: list[ImageObject] = []
if _check_video_is_nested_images(video):
for frame in video:
if not is_valid_image(frame) and not isinstance(frame, dict) and not os.path.exists(frame):
raise ValueError("Invalid image found in video frames.")
frames = video
durations.append(len(frames) / kwargs.get("video_fps", 2.0))
else:
container = av.open(video, "r")
video_stream = next(stream for stream in container.streams if stream.type == "video")
sample_indices = self._get_video_sample_indices(video_stream, **kwargs)
container.seek(0)
for frame_idx, frame in enumerate(container.decode(video_stream)):
if frame_idx in sample_indices:
frames.append(frame.to_image())
if video_stream.duration is None:
durations.append(len(frames) / kwargs.get("video_fps", 2.0))
else:
durations.append(float(video_stream.duration * video_stream.time_base))
frames = self._regularize_images(frames, **kwargs)["images"]
results.append(frames)
return {"videos": results, "durations": durations}
def _regularize_audios(
self, audios: list["AudioInput"], sampling_rate: float, **kwargs
) -> "RegularizedAudioOutput":
results, sampling_rates = [], []
for audio in audios:
if not isinstance(audio, np.ndarray):
audio, sr = torchaudio.load(audio)
if audio.shape[0] > 1:
audio = audio.mean(dim=0, keepdim=True)
if sr != sampling_rate:
audio = torchaudio.functional.resample(audio, sr, sampling_rate)
audio = audio.squeeze(0).numpy()
results.append(audio)
sampling_rates.append(sampling_rate)
return {"audios": results, "sampling_rates": sampling_rates}
def _get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: "MMProcessor",
imglens: list[int] | None = None,
) -> dict[str, "torch.Tensor"]:
mm_inputs = {}
if len(images) != 0:
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
images = self._regularize_images(
images,
image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
)["images"]
if imglens is not None: # if imglens are provided, make batched images
images = _make_batched_images(images, imglens)
image_processor_kwargs = {}
if getattr(processor, "image_do_pan_and_scan", False): # gemma3 image processor
image_processor_kwargs.update(
{
"do_pan_and_scan": True,
"pan_and_scan_min_crop_size": 256,
"pan_and_scan_max_num_crops": 4,
"pan_and_scan_min_ratio_to_activate": 1.2,
}
)
mm_inputs.update(image_processor(images, return_tensors="pt", **image_processor_kwargs))
if len(videos) != 0:
video_processor: BaseImageProcessor = getattr(
processor, "video_processor", getattr(processor, "image_processor", None)
)
videos = self._regularize_videos(
videos,
image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
video_fps=getattr(processor, "video_fps", 2.0),
video_maxlen=getattr(processor, "video_maxlen", 128),
)["videos"]
if "videos" in inspect.signature(video_processor.preprocess).parameters: # for qwen2_vl and video_llava
mm_inputs.update(video_processor(images=None, videos=videos, return_tensors="pt"))
else: # for llava_next_video
mm_inputs.update(video_processor(videos, return_tensors="pt"))
if len(audios) != 0:
feature_extractor: SequenceFeatureExtractor = getattr(processor, "feature_extractor", None) or getattr(
processor, "audio_processor", None
)
audios = self._regularize_audios(
audios,
sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
)["audios"]
mm_inputs.update(
feature_extractor(
audios,
sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
return_attention_mask=True,
padding="max_length",
return_tensors="pt",
)
)
mm_inputs["feature_attention_mask"] = mm_inputs.pop("attention_mask", None) # prevent conflicts
return mm_inputs
@dataclass
class BasePlugin(MMPluginMixin):
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
return messages
def process_token_ids(
self,
input_ids: list[int],
labels: list[int] | None,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
tokenizer: "PreTrainedTokenizer",
processor: Optional["MMProcessor"],
) -> tuple[list[int], list[int] | None]:
self._validate_input(processor, images, videos, audios)
return input_ids, labels
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["MMProcessor"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
return self._get_mm_inputs(images, videos, audios, processor)
@dataclass
class ErnieVLPlugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
messages = deepcopy(messages)
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
merge_length: int = getattr(image_processor, "merge_size") ** 2
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
image_grid_thw = mm_inputs.get("image_grid_thw", [])
video_grid_thw = mm_inputs.get("video_grid_thw", [])
else:
image_grid_thw = [None] * len(images)
video_grid_thw = [None] * len(videos)
image_idx, video_idx = 0, 0
for message in messages:
content = message["content"]
image_token = self.image_token or "<|IMAGE_PLACEHOLDER|>"
video_token = self.video_token or "<|VIDEO_PLACEHOLDER|>"
while IMAGE_PLACEHOLDER in content:
image_seqlen = image_grid_thw[image_idx].prod() // merge_length if self.expand_mm_tokens else 1
content = content.replace(
IMAGE_PLACEHOLDER,
f"Picture {image_idx + 1}:<|IMAGE_START|>{image_token * image_seqlen}<|IMAGE_END|>",
1,
)
image_idx += 1
while VIDEO_PLACEHOLDER in content:
video_seqlen = video_grid_thw[video_idx].prod() // merge_length if self.expand_mm_tokens else 1
content = content.replace(
VIDEO_PLACEHOLDER,
f"Video {video_idx + 1}:<|VIDEO_START|>{video_token * video_seqlen}<|VIDEO_END|>",
1,
)
video_idx += 1
message["content"] = content
return messages
@dataclass
class Gemma3Plugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens = 0
messages = deepcopy(messages)
boi_token: str = getattr(processor, "boi_token")
full_image_sequence: str = getattr(processor, "full_image_sequence")
image_str = full_image_sequence if self.expand_mm_tokens else boi_token
do_pan_and_scan: bool = getattr(processor, "image_do_pan_and_scan", False)
if do_pan_and_scan:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
if do_pan_and_scan:
image_placeholder_str = (
"Here is the original image {{image}} and here are some crops to help you see better "
+ " ".join(["{{image}}"] * mm_inputs["num_crops"][0][num_image_tokens])
)
else:
image_placeholder_str = "{{image}}"
content = content.replace(IMAGE_PLACEHOLDER, image_placeholder_str, 1)
num_image_tokens += 1
message["content"] = content.replace("{{image}}", image_str)
return messages
@override
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["MMProcessor"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
mm_inputs.pop("num_crops", None)
mm_inputs["token_type_ids"] = _get_gemma3_token_type_ids(batch_ids, processor)
return mm_inputs
class Gemma3nPlugin(Gemma3Plugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
messages = deepcopy(messages)
boi_token: str = getattr(processor, "boi_token")
boa_token: str = getattr(processor, "boa_token")
full_image_sequence: str = getattr(processor, "full_image_sequence")
full_audio_sequence: str = getattr(processor, "full_audio_sequence")
image_str = full_image_sequence if self.expand_mm_tokens else boi_token
audio_str = full_audio_sequence if self.expand_mm_tokens else boa_token
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
content = content.replace(IMAGE_PLACEHOLDER, image_str, 1)
while AUDIO_PLACEHOLDER in content:
content = content.replace(AUDIO_PLACEHOLDER, audio_str, 1)
message["content"] = content
return messages
@dataclass
class InternVLPlugin(BasePlugin):
@override
def _get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: "ProcessorMixin",
**kwargs,
) -> dict[str, "torch.Tensor"]:
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
image_processor_kwargs = {}
if getattr(processor, "crop_to_patches", False):
image_processor_kwargs.update(
{
"crop_to_patches": True,
"max_patches": 12,
"min_patches": 1,
}
)
mm_inputs = {}
image_video_patches = []
if len(images) != 0:
images = self._regularize_images(
images,
image_max_pixels=getattr(processor, "image_max_pixels", 1024 * 1024),
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
)["images"]
if len(videos) != 0:
videos = self._regularize_videos(
videos,
image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
video_fps=getattr(processor, "video_fps", 2.0),
video_maxlen=getattr(processor, "video_maxlen", 128),
)["videos"]
if len(images) != 0:
images = make_flat_list_of_images(images)
image_inputs = image_processor(images=images, return_tensors="pt", **image_processor_kwargs)
image_num_patches = image_inputs.pop("num_patches")
image_pixel_values = image_inputs.pop("pixel_values")
image_num_patches_indices = np.cumsum(image_num_patches)
if len(videos) != 0:
videos = make_batched_videos(videos)
num_frames_per_video = [len(video) for video in videos]
patch_indices = np.cumsum(num_frames_per_video)
image_processor_kwargs["crop_to_patches"] = False
video_inputs = image_processor(images=videos, return_tensors="pt", **image_processor_kwargs)
video_num_patches = video_inputs.pop("num_patches")
video_pixel_values = video_inputs.pop("pixel_values")
video_num_patches_indices = np.cumsum(video_num_patches)
# NOT SUPPORT IMAGE VIDEO INTERLEAVED
if len(images) != 0 and image_pixel_values is not None:
for i in range(len(images)):
start_index = image_num_patches_indices[i - 1] if i > 0 else 0
end_index = image_num_patches_indices[i]
image_video_patches.append(image_pixel_values[start_index:end_index])
if len(videos) != 0 and video_pixel_values is not None:
patch_indices_with_prefix = [0] + list(patch_indices)
for i in range(len(videos)):
current_patch_index = patch_indices_with_prefix[i]
end_patch_index = patch_indices_with_prefix[i + 1]
start_index = video_num_patches_indices[current_patch_index - 1] if i > 0 else 0
end_index = video_num_patches_indices[end_patch_index - 1]
image_video_patches.append(video_pixel_values[start_index:end_index])
if len(images) != 0 or len(videos) != 0:
mm_inputs["pixel_values"] = torch.cat(image_video_patches, dim=0)
if len(images) != 0:
mm_inputs.update({"image_num_patches": image_num_patches})
if len(videos) != 0:
mm_inputs.update({"video_patch_indices": patch_indices})
mm_inputs.update({"video_num_patches": video_num_patches})
return mm_inputs
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["ProcessorMixin"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens, num_video_tokens = 0, 0
image_seqlen = getattr(processor, "image_seq_length") if self.expand_mm_tokens else 1
messages = deepcopy(messages)
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
image_pixel_patch_list = mm_inputs.get("image_num_patches") # pathes of images
video_num_patches = mm_inputs.get("video_num_patches") # all patches for frames of videos
video_patch_indices = mm_inputs.get("video_patch_indices") # num frames of per video
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
content = content.replace(
IMAGE_PLACEHOLDER,
f"<img>{'<IMG_CONTEXT>' * image_seqlen * image_pixel_patch_list[num_image_tokens]}</img>",
1,
)
num_image_tokens += 1
while VIDEO_PLACEHOLDER in content:
current_patch_index = video_patch_indices[num_video_tokens - 1] if num_video_tokens > 0 else 0
end_patch_index = video_patch_indices[num_video_tokens]
num_patches = list(video_num_patches[current_patch_index:end_patch_index])
video_replaced_prompt = "\n".join(
f"Frame{i + 1}: <img>{'<IMG_CONTEXT>' * image_seqlen * num_patches[i]}</img>"
for i in range(len(num_patches))
)
content = content.replace(VIDEO_PLACEHOLDER, video_replaced_prompt, 1)
num_video_tokens += 1
message["content"] = content
return messages
@override
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["ProcessorMixin"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
mm_inputs.pop("image_num_patches", None)
mm_inputs.pop("video_patch_indices", None)
mm_inputs.pop("video_num_patches", None)
return mm_inputs
class KimiVLPlugin(BasePlugin):
@override
def process_messages(self, messages, images, videos, audios, processor):
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
image_grid_hws = mm_inputs.get("image_grid_hws", [])
else:
image_grid_hws = [None] * len(images)
num_image_tokens = 0
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
merge_length = math.prod(image_processor.merge_kernel_size)
messages = deepcopy(messages)
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
image_seqlen = image_grid_hws[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
content = content.replace(
IMAGE_PLACEHOLDER,
f"<|media_start|>image<|media_content|>{self.image_token * image_seqlen}<|media_end|>",
1,
)
num_image_tokens += 1
message["content"] = content
return messages
@dataclass
class Llama4Plugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
if "pixel_values" in mm_inputs:
image_height, image_width = mm_inputs["pixel_values"][0].shape[-2:]
num_patches_per_chunk = int(
(image_height // processor.patch_size)
* (image_width // processor.patch_size)
// processor.downsample_ratio
)
aspect_ratios = mm_inputs.pop("aspect_ratios")
num_image_tokens = 0
messages = deepcopy(messages)
for message in messages:
content = message["content"]
if self.expand_mm_tokens:
placeholder_count = content.count(IMAGE_PLACEHOLDER)
prompt_splits = content.split(IMAGE_PLACEHOLDER)
new_content = []
for local_image_index, split_part in enumerate(prompt_splits):
new_content.append(split_part)
if local_image_index < placeholder_count:
tokens_for_this_image = processor._prompt_split_image(
aspect_ratios[num_image_tokens], num_patches_per_chunk
)
num_image_tokens += 1
new_content.append(tokens_for_this_image)
content = "".join(new_content)
else:
content = content.replace(IMAGE_PLACEHOLDER, self.image_token)
message["content"] = content
return messages
@override
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["MMProcessor"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
mm_inputs.pop("aspect_ratios", None)
return mm_inputs
@dataclass
class LlavaPlugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
messages = deepcopy(messages)
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
if "pixel_values" in mm_inputs:
height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values"][0]))
image_seqlen = (height // processor.patch_size) * (
width // processor.patch_size
) + processor.num_additional_image_tokens
if processor.vision_feature_select_strategy == "default":
image_seqlen -= 1
else:
image_seqlen = 1
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}" * image_seqlen, 1)
message["content"] = content.replace("{{image}}", self.image_token)
return messages
@dataclass
class LlavaNextPlugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens = 0
messages = deepcopy(messages)
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
if "pixel_values" in mm_inputs:
image_sizes = iter(mm_inputs["image_sizes"].tolist())
height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values"][0][0]))
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
if self.expand_mm_tokens:
orig_height, orig_width = next(image_sizes)
image_seqlen = processor._get_number_of_features(orig_height, orig_width, height, width)
if processor.vision_feature_select_strategy == "default":
image_seqlen -= 1
else:
image_seqlen = 1
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}" * image_seqlen, 1)
num_image_tokens += 1
message["content"] = content.replace("{{image}}", self.image_token)
return messages
@dataclass
class LlavaNextVideoPlugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
messages = deepcopy(messages)
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
if "pixel_values" in mm_inputs:
image_sizes = iter(mm_inputs["image_sizes"].tolist())
height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values"][0][0]))
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
if self.expand_mm_tokens:
orig_height, orig_width = next(image_sizes)
image_seqlen = processor._get_number_of_features(orig_height, orig_width, height, width)
if processor.vision_feature_select_strategy == "default":
image_seqlen -= 1
else:
image_seqlen = 1
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}" * image_seqlen, 1)
message["content"] = content.replace("{{image}}", self.image_token)
if self.expand_mm_tokens:
if "pixel_values_videos" in mm_inputs:
one_video = to_numpy_array(mm_inputs.get("pixel_values_videos")[0])
height, width = get_image_size(one_video[0])
num_frames = one_video.shape[0] # frame dim is always after batch dim
image_seqlen = (height // processor.patch_size) * (width // processor.patch_size)
video_seqlen = image_seqlen // 4 * num_frames # divide by 4 needed for avg pooling layer
else:
video_seqlen = 1
for message in messages:
content = message["content"]
while VIDEO_PLACEHOLDER in content:
content = content.replace(VIDEO_PLACEHOLDER, "{{video}}" * video_seqlen, 1)
message["content"] = content.replace("{{video}}", self.video_token)
return messages
@dataclass
class MiniCPMVPlugin(BasePlugin):
@override
def _get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: "MMProcessor",
**kwargs,
) -> dict[str, "torch.Tensor"]:
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
mm_inputs = {}
if len(images) != 0:
images = self._regularize_images(
images,
image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
)["images"]
if "valid_image_nums_ls" in kwargs:
valid_image_nums_ls = kwargs["valid_image_nums_ls"]
new_images = []
idx = 0
for valid_image_nums in valid_image_nums_ls:
new_images.append(images[idx : idx + valid_image_nums])
idx += valid_image_nums
images = new_images
image_inputs = image_processor(
images, do_pad=True, max_slice_nums=image_processor.max_slice_nums, return_tensors="pt"
)
mm_inputs.update(image_inputs)
if len(videos) != 0:
videos = self._regularize_videos(
videos,
image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
video_fps=getattr(processor, "video_fps", 2.0),
video_maxlen=getattr(processor, "video_maxlen", 128),
)["videos"]
video_inputs = image_processor(videos, do_pad=True, max_slice_nums=2, return_tensors="pt")
mm_inputs.update(video_inputs)
if len(audios) != 0:
audios = self._regularize_audios(
audios,
sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
)["audios"]
if "valid_audio_nums_ls" in kwargs:
valid_audio_nums_ls = kwargs["valid_audio_nums_ls"]
audios_ls = []
idx = 0
for valid_audio_nums in valid_audio_nums_ls:
audios_ls.append(audios[idx : idx + valid_audio_nums])
idx += valid_audio_nums
else:
audios_ls = [audios]
audio_features, audio_feature_lens, audio_phs = processor.audio_feature_extract(
audios_ls,
chunk_input=True,
sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
)
audio_feature_lens = [
x.clone().detach() if isinstance(x, torch.Tensor) else torch.tensor(x) for x in audio_feature_lens
]
mm_inputs.update({"audio_features": audio_features, "audio_feature_lens": audio_feature_lens})
if kwargs.get("ret_phs", False):
mm_inputs.update({"audio_phs": audio_phs})
return mm_inputs
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens, num_video_tokens, num_audio_tokens = 0, 0, 0
messages = deepcopy(messages)
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
mm_inputs, audio_inputs = {}, {}
if len(images) != 0 and len(videos) != 0:
raise ValueError("MiniCPM-V model does not support input images and videos at the same time.")
if len(videos) != 0:
max_slice_nums = 2
use_image_id = False
mm_inputs = self._get_mm_inputs([], videos, [], processor)
else:
max_slice_nums = image_processor.max_slice_nums
use_image_id = image_processor.use_image_id
for i, message in enumerate(messages):
content = message["content"]
while IMAGE_PLACEHOLDER in content:
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}", 1)
num_image_tokens += 1
while VIDEO_PLACEHOLDER in content:
video_seqlen = len(mm_inputs["image_sizes"][num_video_tokens]) if self.expand_mm_tokens else 1
content = content.replace(VIDEO_PLACEHOLDER, "{{image}}" * video_seqlen, 1)
num_video_tokens += 1
while AUDIO_PLACEHOLDER in content:
content = content.replace(AUDIO_PLACEHOLDER, "{{audio}}", 1)
num_audio_tokens += 1
message["content"] = content.replace("{{image}}", "()").replace(
"{{audio}}", "(<audio>./</audio>)"
)
if len(images):
mm_inputs = self._get_mm_inputs(images, [], [], processor)
if len(audios):
audio_inputs = self._get_mm_inputs([], [], audios, processor, ret_phs=True)
if self.expand_mm_tokens and mm_inputs:
pattern = "()"
image_sizes = mm_inputs["image_sizes"]
idx = 0
for index, message in enumerate(messages):
text = message["content"]
image_tags = re.findall(pattern, text)
text_chunks = text.split(pattern)
final_text = ""
for i in range(len(image_tags)):
final_text = (
final_text
+ text_chunks[i]
+ image_processor.get_slice_image_placeholder(
image_sizes[0][idx], idx, max_slice_nums, use_image_id
)
)
idx += 1
final_text += text_chunks[-1]
messages[index]["content"] = final_text
if self.expand_mm_tokens and audio_inputs:
pattern = "(<audio>./</audio>)"
idx = 0
for index, message in enumerate(messages):
text = message["content"]
audio_tags = re.findall(pattern, text)
text_chunks = text.split(pattern)
final_text = ""
for i in range(len(audio_tags)):
audio_placeholder = audio_inputs["audio_phs"][0][idx]
final_text = final_text + text_chunks[i] + audio_placeholder
idx += 1
final_text += text_chunks[-1]
messages[index]["content"] = final_text
return messages
@override
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["MMProcessor"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
# image bound
image_bounds_list = []
valid_image_nums_ls = []
for i, input_ids in enumerate(batch_ids):
input_ids_ = torch.tensor(input_ids)
start_cond = (input_ids_ == processor.tokenizer.im_start_id) | (
input_ids_ == processor.tokenizer.slice_start_id
)
end_cond = (input_ids_ == processor.tokenizer.im_end_id) | (input_ids_ == processor.tokenizer.slice_end_id)
image_start_tokens = torch.where(start_cond)[0]
image_start_tokens += 1
image_end_tokens = torch.where(end_cond)[0]
valid_image_nums_ls.append(imglens[i])
image_bounds = torch.hstack(
[
image_start_tokens.unsqueeze(-1),
image_end_tokens.unsqueeze(-1),
]
)
image_bounds_list.append(image_bounds)
mm_inputs = self._get_mm_inputs(images, videos, [], processor, valid_image_nums_ls=valid_image_nums_ls)
if "tgt_sizes" not in mm_inputs:
dummy_data = [torch.empty(0) for _ in range(len(batch_ids))]
mm_inputs.update({"tgt_sizes": dummy_data, "pixel_values": dummy_data, "image_sizes": dummy_data})
mm_inputs.update({"image_bound": image_bounds_list})
if len(audios) > 0:
# audio bound
audio_bounds_ls = []
spk_bounds_ls = []
valid_audio_nums_ls = []
for input_ids, audiolen in zip(batch_ids, audlens):
input_ids_ = torch.tensor(input_ids)
audio_start_idx = torch.where(input_ids_ == processor.tokenizer.audio_start_id)[0]
audio_end_idx = torch.where(input_ids_ == processor.tokenizer.audio_end_id)[0]
assert len(audio_start_idx) == len(audio_end_idx)
audio_bounds = torch.hstack([(audio_start_idx + 1).unsqueeze(-1), audio_end_idx.unsqueeze(-1)])
audio_bounds_ls.append(audio_bounds)
valid_audio_nums_ls.append(audiolen)
spk_start_idx = torch.where(input_ids_ == processor.tokenizer.spk_start_id)[0]
spk_end_idx = torch.where(input_ids_ == processor.tokenizer.spk_end_id)[0]
assert len(spk_start_idx) == len(spk_end_idx)
spk_bounds = torch.hstack([(spk_start_idx + 1).unsqueeze(-1), spk_end_idx.unsqueeze(-1)])
spk_bounds_ls.append(spk_bounds)
audio_inputs = self._get_mm_inputs([], [], audios, processor, valid_audio_nums_ls=valid_audio_nums_ls)
mm_inputs.update(audio_inputs)
mm_inputs.update({"audio_bounds": audio_bounds_ls, "spk_bounds": spk_bounds_ls})
return mm_inputs
@dataclass
class MllamaPlugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens = 0
messages = deepcopy(messages)
for message in messages:
content = message["content"]
num_image_tokens += content.count(IMAGE_PLACEHOLDER)
message["content"] = content.replace(IMAGE_PLACEHOLDER, self.image_token)
return messages
@override
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["MMProcessor"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
mm_inputs = self._get_mm_inputs(images, videos, audios, processor, imglens)
if mm_inputs:
num_tiles = mm_inputs.pop("num_tiles")
image_token_id: int = getattr(processor, "image_token_id")
max_image_tiles: int = getattr(processor.image_processor, "max_image_tiles")
cross_attention_token_mask = [
get_cross_attention_token_mask(input_ids, image_token_id) for input_ids in batch_ids
]
mm_inputs["cross_attention_mask"] = torch.from_numpy(
convert_sparse_cross_attention_mask_to_dense(
cross_attention_token_mask,
num_tiles=num_tiles,
max_num_tiles=max_image_tiles,
length=max(len(input_ids) for input_ids in batch_ids),
)
) # shape: (batch_size, length, max_num_images, max_num_tiles)
return mm_inputs
@dataclass
class PaliGemmaPlugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens = 0
messages = deepcopy(messages)
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
content = content.replace(IMAGE_PLACEHOLDER, "", 1)
num_image_tokens += 1
message["content"] = content
return messages
@override
def process_token_ids(
self,
input_ids: list[int],
labels: list[int] | None,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
tokenizer: "PreTrainedTokenizer",
processor: Optional["MMProcessor"],
) -> tuple[list[int], list[int] | None]:
self._validate_input(processor, images, videos, audios)
num_images = len(images)
image_seqlen = processor.image_seq_length if self.expand_mm_tokens else 0 # skip mm token
image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
input_ids = [image_token_id] * num_images * image_seqlen + input_ids
if labels is not None:
labels = [IGNORE_INDEX] * num_images * image_seqlen + labels
return input_ids, labels
@override
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["MMProcessor"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
seqlens = [len(input_ids) for input_ids in batch_ids]
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
mm_inputs["token_type_ids"] = _get_paligemma_token_type_ids(imglens, seqlens, processor)
return mm_inputs
@dataclass
class PixtralPlugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
messages = deepcopy(messages)
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
if "pixel_values" in mm_inputs:
# BC for transformers < 4.49.0
if isinstance(mm_inputs["image_sizes"], list):
image_sizes = iter(mm_inputs["image_sizes"][0])
else:
image_sizes = iter(mm_inputs["image_sizes"].tolist())
image_break_token: str = getattr(processor, "image_break_token")
image_end_token: str = getattr(processor, "image_end_token")
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
if self.expand_mm_tokens:
patch_size = processor.patch_size * getattr(processor, "spatial_merge_size", 1)
height, width = next(image_sizes)
num_height_tokens = height // patch_size
num_width_tokens = width // patch_size
replace_tokens = [[self.image_token] * num_width_tokens + [image_break_token]] * num_height_tokens
replace_tokens = [item for sublist in replace_tokens for item in sublist] # flatten list
replace_tokens[-1] = image_end_token
replace_str = "".join(replace_tokens)
else:
replace_str = self.image_token
content = content.replace(IMAGE_PLACEHOLDER, replace_str, 1)
message["content"] = content
return messages
@override
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["MMProcessor"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
# ref to this commit https://github.com/huggingface/transformers/pull/35122
# after transformers 4.49.0, the `image_sizes` is mandatory as an input parameter for Pixtral VisionEncoder forwarding.
# it can be passed into `LlavaConditionalGeneration` as a parameter.
if not is_transformers_version_greater_than("4.49.0"):
mm_inputs.pop("image_sizes", None)
return mm_inputs
@dataclass
class Qwen2AudioPlugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
bos_token: str = getattr(processor, "audio_bos_token")
eos_token: str = getattr(processor, "audio_eos_token")
messages = deepcopy(messages)
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs([], [], audios, processor)
if "feature_attention_mask" in mm_inputs:
audio_lengths = mm_inputs["feature_attention_mask"].sum(-1).tolist()
for message in messages:
content = message["content"]
while AUDIO_PLACEHOLDER in content:
if self.expand_mm_tokens:
audio_length = audio_lengths.pop(0)
input_length = (audio_length - 1) // 2 + 1
audio_seqlen = (input_length - 2) // 2 + 1
else:
audio_seqlen = 1
content = content.replace(
AUDIO_PLACEHOLDER, f"{bos_token}{self.audio_token * audio_seqlen}{eos_token}", 1
)
message["content"] = content
return messages
@override
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["MMProcessor"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
return self._get_mm_inputs(images, videos, audios, processor)
@dataclass
class Qwen2VLPlugin(BasePlugin):
vision_bos_token: str = "<|vision_start|>"
vision_eos_token: str = "<|vision_end|>"
@override
def _preprocess_image(self, image: "ImageObject", **kwargs) -> "ImageObject":
image = super()._preprocess_image(image, **kwargs)
if min(image.width, image.height) < 28:
width, height = max(image.width, 28), max(image.height, 28)
image = image.resize((width, height))
if image.width / image.height > 200:
width, height = image.height * 180, image.height
image = image.resize((width, height))
if image.height / image.width > 200:
width, height = image.width, image.width * 180
image = image.resize((width, height))
return image
@override
def _regularize_videos(self, videos: list["VideoInput"], **kwargs) -> "RegularizedVideoOutput":
results, fps_per_video, durations = [], [], []
for video in videos:
frames: list[ImageObject] = []
if _check_video_is_nested_images(video):
for frame in video:
if not is_valid_image(frame) and not isinstance(frame, dict) and not os.path.exists(frame):
raise ValueError("Invalid image found in video frames.")
frames = video
fps_per_video.append(kwargs.get("video_fps", 2.0))
durations.append(len(frames) / kwargs.get("video_fps", 2.0))
else:
container = av.open(video, "r")
video_stream = next(stream for stream in container.streams if stream.type == "video")
sample_indices = self._get_video_sample_indices(video_stream, **kwargs)
container.seek(0)
for frame_idx, frame in enumerate(container.decode(video_stream)):
if frame_idx in sample_indices:
frames.append(frame.to_image())
if video_stream.duration is None:
fps_per_video.append(kwargs.get("video_fps", 2.0))
durations.append(len(frames) / kwargs.get("video_fps", 2.0))
else:
fps_per_video.append(len(sample_indices) / float(video_stream.duration * video_stream.time_base))
durations.append(float(video_stream.duration * video_stream.time_base))
if len(frames) % 2 != 0:
frames.append(frames[-1])
frames = self._regularize_images(frames, **kwargs)["images"]
results.append(frames)
return {"videos": results, "fps_per_video": fps_per_video, "durations": durations}
@override
def _get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: "MMProcessor",
) -> dict[str, "torch.Tensor"]:
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
video_processor: BaseVideoProcessor = getattr(processor, "video_processor", None)
mm_inputs = {}
if len(images) != 0:
images = self._regularize_images(
images,
image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
)["images"]
mm_inputs.update(image_processor(images, return_tensors="pt"))
if len(videos) != 0:
video_data = self._regularize_videos(
videos,
image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
video_fps=getattr(processor, "video_fps", 2.0),
video_maxlen=getattr(processor, "video_maxlen", 128),
)
mm_inputs.update(video_processor(videos=video_data["videos"], return_tensors="pt"))
temporal_patch_size: int = getattr(image_processor, "temporal_patch_size", 2)
if "second_per_grid_ts" in processor.model_input_names:
mm_inputs["second_per_grid_ts"] = [temporal_patch_size / fps for fps in video_data["fps_per_video"]]
return mm_inputs
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens, num_video_tokens = 0, 0
messages = deepcopy(messages)
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
merge_length: int = getattr(image_processor, "merge_size") ** 2
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
image_grid_thw = mm_inputs.get("image_grid_thw", [])
video_grid_thw = mm_inputs.get("video_grid_thw", [])
else:
image_grid_thw = [None] * len(images)
video_grid_thw = [None] * len(videos)
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
image_seqlen = image_grid_thw[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
content = content.replace(
IMAGE_PLACEHOLDER,
f"{self.vision_bos_token}{self.image_token * image_seqlen}{self.vision_eos_token}",
1,
)
num_image_tokens += 1
while VIDEO_PLACEHOLDER in content:
video_seqlen = video_grid_thw[num_video_tokens].prod() // merge_length if self.expand_mm_tokens else 1
content = content.replace(
VIDEO_PLACEHOLDER,
f"{self.vision_bos_token}{self.video_token * video_seqlen}{self.vision_eos_token}",
1,
)
num_video_tokens += 1
message["content"] = content
return messages
@dataclass
class Qwen3VLPlugin(Qwen2VLPlugin):
@override
def _get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: "MMProcessor",
) -> dict[str, "torch.Tensor"]:
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
video_processor: BaseImageProcessor = getattr(processor, "video_processor", None)
mm_inputs = {}
if len(images) != 0:
images = self._regularize_images(
images,
image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
)["images"]
mm_inputs.update(image_processor(images, return_tensors="pt"))
if len(videos) != 0:
videos = self._regularize_videos(
videos,
image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
video_fps=getattr(processor, "video_fps", 2.0),
video_maxlen=getattr(processor, "video_maxlen", 128),
)
video_metadata = [
{"fps": getattr(processor, "video_fps", 24.0), "duration": duration, "total_num_frames": len(video)}
for video, duration in zip(videos["videos"], videos["durations"])
]
mm_inputs.update(
video_processor(
videos=videos["videos"],
video_metadata=video_metadata,
fps=getattr(processor, "video_fps", 2.0),
return_metadata=True,
)
)
temporal_patch_size: int = getattr(image_processor, "temporal_patch_size", 2)
if "second_per_grid_ts" in processor.model_input_names:
mm_inputs["second_per_grid_ts"] = [temporal_patch_size / fps for fps in videos["fps_per_video"]]
return mm_inputs
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens, num_video_tokens = 0, 0
messages = deepcopy(messages)
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
video_processor: BaseImageProcessor = getattr(processor, "video_processor")
image_merge_length: int = getattr(image_processor, "merge_size") ** 2
video_merge_length: int = getattr(video_processor, "merge_size") ** 2
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
image_grid_thw = mm_inputs.get("image_grid_thw", [])
video_grid_thw = mm_inputs.get("video_grid_thw", [])
num_frames = video_grid_thw[0][0] if len(video_grid_thw) > 0 else 0 # hard code for now
video_metadata = mm_inputs.get("video_metadata", {})
else:
image_grid_thw = [None] * len(images)
video_grid_thw = [None] * len(videos)
num_frames = 0
timestamps = [0]
for idx, message in enumerate(messages):
content = message["content"]
while IMAGE_PLACEHOLDER in content:
image_seqlen = (
image_grid_thw[num_image_tokens].prod() // image_merge_length if self.expand_mm_tokens else 1
)
content = content.replace(
IMAGE_PLACEHOLDER,
f"{self.vision_bos_token}{self.image_token * image_seqlen}{self.vision_eos_token}",
1,
)
num_image_tokens += 1
while VIDEO_PLACEHOLDER in content:
if self.expand_mm_tokens:
metadata = video_metadata[idx]
timestamps = processor._calculate_timestamps(
metadata.frames_indices,
metadata.fps,
video_processor.merge_size,
)
video_structure = ""
for frame_index in range(num_frames):
video_seqlen = (
video_grid_thw[num_video_tokens][1:].prod() // video_merge_length
if self.expand_mm_tokens
else 1
)
timestamp_sec = timestamps[frame_index]
frame_structure = (
f"<{timestamp_sec:.1f} seconds>"
f"{self.vision_bos_token}{self.video_token * video_seqlen}{self.vision_eos_token}"
)
video_structure += frame_structure
else:
video_structure = f"{self.vision_bos_token}{self.video_token}{self.vision_eos_token}"
content = content.replace(VIDEO_PLACEHOLDER, video_structure, 1)
num_video_tokens += 1
message["content"] = content
return messages
@dataclass
class GLM4VPlugin(Qwen2VLPlugin):
@override
def _get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: "MMProcessor",
) -> dict[str, "torch.Tensor"]:
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
video_processor: BaseImageProcessor = getattr(processor, "video_processor", None)
mm_inputs = {}
if len(images) != 0:
images = self._regularize_images(
images,
image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
)["images"]
mm_inputs.update(image_processor(images, return_tensors="pt"))
if len(videos) != 0:
video_data = self._regularize_videos(
videos,
image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
video_fps=getattr(processor, "video_fps", 2.0),
video_maxlen=getattr(processor, "video_maxlen", 128),
)
# prepare video metadata
video_metadata = [
{"fps": 2, "duration": duration, "total_frames": len(video)}
for video, duration in zip(video_data["videos"], video_data["durations"])
]
mm_inputs.update(video_processor(images=None, videos=video_data["videos"], video_metadata=video_metadata))
return mm_inputs
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens, num_video_tokens = 0, 0
messages = deepcopy(messages)
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
merge_length: int = getattr(image_processor, "merge_size") ** 2
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
image_grid_thw = mm_inputs.get("image_grid_thw", [])
video_grid_thw = mm_inputs.get("video_grid_thw", [])
num_frames = video_grid_thw[0][0] if len(video_grid_thw) > 0 else 0 # hard code for now
timestamps = mm_inputs.get("timestamps", [])
if hasattr(timestamps, "tolist"):
timestamps = timestamps.tolist()
if not timestamps:
timestamps_list = []
elif isinstance(timestamps[0], list):
timestamps_list = timestamps[0]
else:
timestamps_list = timestamps
unique_timestamps = timestamps_list.copy()
selected_timestamps = unique_timestamps[:num_frames]
while len(selected_timestamps) < num_frames:
selected_timestamps.append(selected_timestamps[-1] if selected_timestamps else 0)
else:
image_grid_thw = [None] * len(images)
video_grid_thw = [None] * len(videos)
num_frames = 0
selected_timestamps = [0]
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
image_seqlen = image_grid_thw[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
content = content.replace(
IMAGE_PLACEHOLDER, f"<|begin_of_image|>{self.image_token * image_seqlen}<|end_of_image|>", 1
)
num_image_tokens += 1
while VIDEO_PLACEHOLDER in content:
video_structure = ""
for frame_index in range(num_frames):
video_seqlen = (
video_grid_thw[num_video_tokens][1:].prod() // merge_length if self.expand_mm_tokens else 1
)
timestamp_sec = selected_timestamps[frame_index]
frame_structure = (
f"<|begin_of_image|>{self.image_token * video_seqlen}<|end_of_image|>{timestamp_sec}"
)
video_structure += frame_structure
if not self.expand_mm_tokens:
video_structure = self.video_token
content = content.replace(VIDEO_PLACEHOLDER, f"<|begin_of_video|>{video_structure}<|end_of_video|>", 1)
num_video_tokens += 1
message["content"] = content
return messages
@override
def get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
imglens: list[int],
vidlens: list[int],
audlens: list[int],
batch_ids: list[list[int]],
processor: Optional["ProcessorMixin"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
self._validate_input(processor, images, videos, audios)
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
mm_inputs.pop("timestamps", None)
return mm_inputs
@dataclass
class Qwen2OmniPlugin(Qwen2VLPlugin):
audio_bos_token: str = "<|audio_start|>"
audio_eos_token: str = "<|audio_end|>"
@override
def _get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: "MMProcessor",
) -> dict[str, "torch.Tensor"]:
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
video_processor: BaseVideoProcessor = getattr(processor, "video_processor", None)
feature_extractor: SequenceFeatureExtractor = getattr(processor, "feature_extractor", None) or getattr(
processor, "audio_processor", None
)
mm_inputs = {}
if len(images) != 0:
images = self._regularize_images(
images,
image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
)["images"]
mm_inputs.update(image_processor(images, return_tensors="pt"))
if len(videos) != 0:
video_dict = self._regularize_videos(
videos,
image_max_pixels=getattr(processor, "video_max_pixels", 256 * 256),
image_min_pixels=getattr(processor, "video_min_pixels", 16 * 16),
video_fps=getattr(processor, "video_fps", 2.0),
video_maxlen=getattr(processor, "video_maxlen", 128),
)
mm_inputs.update(video_processor(videos=video_dict["videos"], return_tensors="pt"))
temporal_patch_size: int = getattr(image_processor, "temporal_patch_size", 2)
mm_inputs["video_second_per_grid"] = torch.tensor(
[temporal_patch_size / fps for fps in video_dict["fps_per_video"]]
)
if len(audios) != 0:
audios = self._regularize_audios(
audios,
sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
)["audios"]
mm_inputs.update(
feature_extractor(
audios,
sampling_rate=getattr(processor, "audio_sampling_rate", 16000),
return_attention_mask=True,
padding="max_length",
return_tensors="pt",
)
)
mm_inputs["feature_attention_mask"] = mm_inputs.pop("attention_mask") # prevent conflicts
return mm_inputs
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens, num_video_tokens, num_audio_tokens = 0, 0, 0
messages = deepcopy(messages)
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
merge_length = processor.image_processor.merge_size**2
use_audio_in_video = getattr(processor, "use_audio_in_video", False)
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
image_grid_thw = mm_inputs.get("image_grid_thw", [])
video_grid_thw = mm_inputs.get("video_grid_thw", [])
if "feature_attention_mask" in mm_inputs:
if processor.__class__.__name__ == "Qwen3OmniMoeProcessor": # for qwen3omni
input_lengths = mm_inputs["feature_attention_mask"].sum(-1)
input_lengths_leave = input_lengths % 100
feature_lengths = (input_lengths_leave - 1) // 2 + 1
audio_lengths = ((feature_lengths - 1) // 2 + 1 - 1) // 2 + 1 + (input_lengths // 100) * 13
else:
input_lengths = (mm_inputs["feature_attention_mask"].sum(-1).numpy() - 1) // 2 + 1
audio_lengths = (input_lengths - 2) // 2 + 1
else:
mm_inputs = {}
image_grid_thw = [None] * len(images)
video_grid_thw = [None] * len(videos)
audio_lengths = [None] * len(audios)
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
image_seqlen = image_grid_thw[num_image_tokens].prod() // merge_length if self.expand_mm_tokens else 1
content = content.replace(
IMAGE_PLACEHOLDER,
f"{self.vision_bos_token}{self.image_token * image_seqlen}{self.vision_eos_token}",
1,
)
num_image_tokens += 1
if (
use_audio_in_video and len(audios) and len(videos)
): # if use the audio of video # deal video token and audio token togather
if len(videos) != len(audios):
raise ValueError(
f"Number of videos ({len(videos)}) must match number of audios ({len(audios)}) when using audio in video."
)
while VIDEO_PLACEHOLDER in content:
video_pos = content.find(VIDEO_PLACEHOLDER)
audio_pos = content.find(AUDIO_PLACEHOLDER, video_pos)
if audio_pos == -1 or audio_pos < video_pos:
raise ValueError(
f"Each {VIDEO_PLACEHOLDER} must be followed by an {AUDIO_PLACEHOLDER} when using audio in video."
)
position_id_per_seconds: int = getattr(processor, "position_id_per_seconds", 25)
audio_t_index = torch.arange(audio_lengths[num_audio_tokens])
video_t_index = (
torch.arange(video_grid_thw[num_video_tokens][0])
.view(-1, 1, 1)
.expand(
-1,
video_grid_thw[num_video_tokens][1] // image_processor.merge_size,
video_grid_thw[num_video_tokens][2] // image_processor.merge_size,
)
.flatten()
* mm_inputs["video_second_per_grid"][num_video_tokens]
* position_id_per_seconds
).long()
t_ntoken_per_chunk = position_id_per_seconds * 2
video_chunk_indices = processor.get_chunked_index(video_t_index, t_ntoken_per_chunk)
audio_chunk_indices = processor.get_chunked_index(audio_t_index, t_ntoken_per_chunk)
placeholder_string = ""
placeholder_string += self.vision_bos_token + self.audio_bos_token
for j in range(max(len(video_chunk_indices), len(audio_chunk_indices))):
video_chunk_index = video_chunk_indices[j] if j < len(video_chunk_indices) else None
audio_chunk_index = audio_chunk_indices[j] if j < len(audio_chunk_indices) else None
if video_chunk_index is not None:
placeholder_string += self.video_token * (video_chunk_index[1] - video_chunk_index[0])
if audio_chunk_index is not None:
placeholder_string += self.audio_token * (audio_chunk_index[1] - audio_chunk_index[0])
placeholder_string += self.audio_eos_token + self.vision_eos_token
content = content.replace(VIDEO_PLACEHOLDER, placeholder_string, 1)
content = content.replace(AUDIO_PLACEHOLDER, "", 1)
num_audio_tokens += 1
num_video_tokens += 1
else:
while AUDIO_PLACEHOLDER in content:
audio_seqlen = audio_lengths[num_audio_tokens] if self.expand_mm_tokens else 1
content = content.replace(
AUDIO_PLACEHOLDER,
f"{self.audio_bos_token}{self.audio_token * audio_seqlen}{self.audio_eos_token}",
1,
)
num_audio_tokens += 1
while VIDEO_PLACEHOLDER in content:
video_seqlen = (
video_grid_thw[num_video_tokens].prod() // merge_length if self.expand_mm_tokens else 1
)
content = content.replace(
VIDEO_PLACEHOLDER,
f"{self.vision_bos_token}{self.video_token * video_seqlen}{self.vision_eos_token}",
1,
)
num_video_tokens += 1
message["content"] = content
return messages
@dataclass
class VideoLlavaPlugin(BasePlugin):
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens, num_video_tokens = 0, 0
messages = deepcopy(messages)
num_frames = 0
if self.expand_mm_tokens:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
if "pixel_values_images" in mm_inputs:
height, width = get_image_size(to_numpy_array(mm_inputs["pixel_values_images"][0]))
num_frames = 1
if "pixel_values_videos" in mm_inputs:
one_video = to_numpy_array(mm_inputs["pixel_values_videos"][0])
height, width = get_image_size(one_video[0])
num_frames = one_video.shape[0] # frame dim is always after batch dim
if "pixel_values_images" in mm_inputs or "pixel_values_videos" in mm_inputs:
image_seqlen = (height // processor.patch_size) * (
width // processor.patch_size
) + processor.num_additional_image_tokens
video_seqlen = image_seqlen * num_frames
if processor.vision_feature_select_strategy == "default":
image_seqlen -= 1
else:
image_seqlen, video_seqlen = 1, 1
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}" * image_seqlen, 1)
num_image_tokens += 1
while VIDEO_PLACEHOLDER in content:
content = content.replace(VIDEO_PLACEHOLDER, "{{video}}" * video_seqlen, 1)
num_video_tokens += 1
content = content.replace("{{image}}", self.image_token)
message["content"] = content.replace("{{video}}", self.video_token)
return messages
@dataclass
class LFMVLPlugin(BasePlugin):
@override
def _get_mm_inputs(
self,
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: "MMProcessor",
) -> dict[str, "torch.Tensor"]:
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
mm_inputs = {}
if len(images) != 0:
images = self._regularize_images(
images,
image_max_pixels=getattr(processor, "image_max_pixels", 768 * 768),
image_min_pixels=getattr(processor, "image_min_pixels", 32 * 32),
)["images"]
mm_inputs.update(image_processor(images, return_tensors="pt"))
return mm_inputs
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
num_image_tokens = 0
messages = deepcopy(messages)
image_processor: BaseImageProcessor = getattr(processor, "image_processor")
downsample_factor: int = getattr(image_processor, "downsample_factor", 2)
if self.expand_mm_tokens and len(images) > 0:
mm_inputs = self._get_mm_inputs(images, videos, audios, processor)
spatial_shapes = mm_inputs.get("spatial_shapes", [])
else:
spatial_shapes = []
for message in messages:
content = message["content"]
while IMAGE_PLACEHOLDER in content:
if self.expand_mm_tokens and len(spatial_shapes) > num_image_tokens:
h, w = spatial_shapes[num_image_tokens].tolist()
image_seqlen = (h * w) // (downsample_factor * downsample_factor)
else:
image_seqlen = 1
content = content.replace(IMAGE_PLACEHOLDER, "{{image}}" * image_seqlen, 1)
num_image_tokens += 1
message["content"] = content.replace("{{image}}", self.image_token)
return messages
@dataclass
class YoutuVLPlugin(BasePlugin):
vision_bos_token: str = "<|vision_start|>"
vision_eos_token: str = "<|vision_end|>"
@override
def process_messages(
self,
messages: list[dict[str, str]],
images: list["ImageInput"],
videos: list["VideoInput"],
audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
self._validate_input(processor, images, videos, audios)
self._validate_messages(messages, images, videos, audios)
messages = deepcopy(messages)
for message in messages:
content = message["content"]
content = content.replace(
IMAGE_PLACEHOLDER, f"{self.vision_bos_token}{self.image_token}{self.vision_eos_token}"
)
content = content.replace(
VIDEO_PLACEHOLDER, f"{self.vision_bos_token}{self.video_token}{self.vision_eos_token}"
)
message["content"] = content
return messages
PLUGINS = {
"base": BasePlugin,
"ernie_vl": ErnieVLPlugin,
"gemma3": Gemma3Plugin,
"glm4v": GLM4VPlugin,
"gemma3n": Gemma3nPlugin,
"intern_vl": InternVLPlugin,
"kimi_vl": KimiVLPlugin,
"llama4": Llama4Plugin,
"llava": LlavaPlugin,
"llava_next": LlavaNextPlugin,
"llava_next_video": LlavaNextVideoPlugin,
"lfm2_vl": LFMVLPlugin,
"minicpm_v": MiniCPMVPlugin,
"mllama": MllamaPlugin,
"paligemma": PaliGemmaPlugin,
"pixtral": PixtralPlugin,
"qwen2_audio": Qwen2AudioPlugin,
"qwen2_omni": Qwen2OmniPlugin,
"qwen2_vl": Qwen2VLPlugin,
"qwen3_vl": Qwen3VLPlugin,
"video_llava": VideoLlavaPlugin,
"youtu_vl": YoutuVLPlugin,
}
def register_mm_plugin(name: str, plugin_class: type["BasePlugin"]) -> None:
if name in PLUGINS:
raise ValueError(f"Multimodal plugin {name} already exists.")
PLUGINS[name] = plugin_class
def get_mm_plugin(
name: str,
image_token: str | None = None,
video_token: str | None = None,
audio_token: str | None = None,
**kwargs,
) -> "BasePlugin":
if name not in PLUGINS:
raise ValueError(f"Multimodal plugin `{name}` not found.")
return PLUGINS[name](image_token, video_token, audio_token, **kwargs) | --- +++ @@ -93,6 +93,14 @@
def _get_paligemma_token_type_ids(imglens: list[int], seqlens: list[int], processor: "MMProcessor") -> list[list[int]]:
+ r"""Get paligemma token type ids for computing loss.
+
+ It is slightly different with the original token type ids where the prompt part is 0.
+
+ Returns:
+ batch_token_type_ids: shape (batch_size, seq_length)
+
+ """
batch_token_type_ids = []
for imglen, seqlen in zip(imglens, seqlens):
image_seqlen = imglen * processor.image_seq_length
@@ -102,6 +110,12 @@
def _get_gemma3_token_type_ids(batch_ids: list[list[int]], processor: "MMProcessor"):
+ r"""Get gemma3 token type ids for computing loss.
+
+ Returns:
+ batch_token_type_ids: shape (batch_size, seq_length)
+
+ """
image_token_id: int = getattr(processor, "image_token_id")
batch_token_type_ids = []
for token_ids in batch_ids:
@@ -114,6 +128,7 @@
def _make_batched_images(images: list["ImageObject"], imglens: list[int]) -> list[list["ImageObject"]]:
+ r"""Make nested list of images."""
batch_images = []
for imglen in imglens:
batch_images.append(images[:imglen])
@@ -123,6 +138,7 @@
def _check_video_is_nested_images(video: "VideoInput") -> bool:
+ r"""Check if the video is nested images."""
return isinstance(video, list) and all(isinstance(frame, (str, BinaryIO, dict, ImageObject)) for frame in video)
@@ -140,6 +156,7 @@ videos: list["VideoInput"],
audios: list["AudioInput"],
) -> None:
+ r"""Validate if this model accepts the input modalities."""
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
video_processor: BaseImageProcessor = getattr(
processor, "video_processor", getattr(processor, "image_processor", None)
@@ -181,6 +198,7 @@ videos: list["VideoInput"],
audios: list["AudioInput"],
):
+ r"""Validate if the number of images, videos and audios match the number of placeholders in messages."""
num_image_tokens, num_video_tokens, num_audio_tokens = 0, 0, 0
for message in messages:
num_image_tokens += message["content"].count(IMAGE_PLACEHOLDER)
@@ -205,6 +223,7 @@ def _preprocess_image(
self, image: "ImageObject", image_max_pixels: int, image_min_pixels: int, **kwargs
) -> "ImageObject":
+ r"""Pre-process a single image."""
if (image.width * image.height) > image_max_pixels:
resize_factor = math.sqrt(image_max_pixels / (image.width * image.height))
width, height = int(image.width * resize_factor), int(image.height * resize_factor)
@@ -223,6 +242,7 @@ def _get_video_sample_indices(
self, video_stream: "Stream", video_fps: float, video_maxlen: int, **kwargs
) -> list[int]:
+ r"""Compute video sample indices according to fps."""
total_frames = video_stream.frames
if total_frames == 0: # infinite video
return np.linspace(0, video_maxlen - 1, video_maxlen).astype(np.int32)
@@ -232,6 +252,7 @@ return np.linspace(0, total_frames - 1, sample_frames).astype(np.int32)
def _regularize_images(self, images: list["ImageInput"], **kwargs) -> "RegularizedImageOutput":
+ r"""Regularize images to avoid error. Including reading and pre-processing."""
results = []
for image in images:
if isinstance(image, (str, BinaryIO)):
@@ -252,6 +273,7 @@ return {"images": results}
def _regularize_videos(self, videos: list["VideoInput"], **kwargs) -> "RegularizedVideoOutput":
+ r"""Regularizes videos to avoid error. Including reading, resizing and converting."""
results = []
durations = []
for video in videos:
@@ -284,6 +306,7 @@ def _regularize_audios(
self, audios: list["AudioInput"], sampling_rate: float, **kwargs
) -> "RegularizedAudioOutput":
+ r"""Regularizes audios to avoid error. Including reading and resampling."""
results, sampling_rates = [], []
for audio in audios:
if not isinstance(audio, np.ndarray):
@@ -309,6 +332,25 @@ processor: "MMProcessor",
imglens: list[int] | None = None,
) -> dict[str, "torch.Tensor"]:
+ r"""Process visual inputs.
+
+ Returns: (llava and paligemma)
+ pixel_values: tensor with shape (B, C, H, W)
+
+ Returns: (qwen2-vl)
+ pixel_values: tensor with shape (num_patches, patch_dim)
+ image_grid_thw: tensor with shape (num_images, 3), where the three numbers are time, width, height
+ where num_patches == torch.prod(image_grid_thw)
+
+ Returns: (mllama)
+ pixel_values: tensor with shape
+ (batch_size, max_num_images, max_image_tiles, channels, tile_height, tile_width)
+ For example, (2, 1, 4, 3, 560, 560).
+ aspect_ratio_ids: tensor with shape (batch_size, max_num_images). For example, (2, 1).
+ aspect_ratio_mask: tensor with shape (batch_size, max_num_images, max_image_tiles). For example, (2, 1, 4).
+ num_tiles: List[List[int]] with shape (batch_size, num_images_in_batch). For example, (2, 1).
+
+ """
mm_inputs = {}
if len(images) != 0:
image_processor: BaseImageProcessor = getattr(processor, "image_processor", None)
@@ -381,6 +423,7 @@ audios: list["AudioInput"],
processor: Optional["MMProcessor"],
) -> list[dict[str, str]]:
+ r"""Pre-process input messages before tokenization for VLMs."""
self._validate_input(processor, images, videos, audios)
return messages
@@ -394,6 +437,7 @@ tokenizer: "PreTrainedTokenizer",
processor: Optional["MMProcessor"],
) -> tuple[list[int], list[int] | None]:
+ r"""Pre-process token ids after tokenization for VLMs."""
self._validate_input(processor, images, videos, audios)
return input_ids, labels
@@ -408,6 +452,19 @@ batch_ids: list[list[int]],
processor: Optional["MMProcessor"],
) -> dict[str, Union[list[int], "torch.Tensor"]]:
+ r"""Build batched multimodal inputs for VLMs.
+
+ Arguments:
+ images: a list of image inputs, shape (num_images,)
+ videos: a list of video inputs, shape (num_videos,)
+ audios: a list of audio inputs, shape (num_audios,)
+ imglens: number of images in each sample, shape (batch_size,)
+ vidlens: number of videos in each sample, shape (batch_size,)
+ audlens: number of audios in each sample, shape (batch_size,)
+ batch_ids: token ids of input samples, shape (batch_size, seq_len)
+ processor: a processor for pre-processing images and videos
+
+ """
self._validate_input(processor, images, videos, audios)
return self._get_mm_inputs(images, videos, audios, processor)
@@ -2046,6 +2103,12 @@
@dataclass
class LFMVLPlugin(BasePlugin):
+ r"""Plugin for LFM2.5-VL vision-language models.
+
+ LFM2.5-VL uses dynamic image token counts based on image resolution.
+ The image processor returns spatial_shapes tensor with [height, width] grid dimensions.
+ Token count per image = (spatial_h * spatial_w) / (downsample_factor^2)
+ """
@override
def _get_mm_inputs(
@@ -2107,6 +2170,7 @@
@dataclass
class YoutuVLPlugin(BasePlugin):
+ r"""Plugin for Youtu-VL vision-language models."""
vision_bos_token: str = "<|vision_start|>"
vision_eos_token: str = "<|vision_end|>"
@@ -2165,6 +2229,7 @@
def register_mm_plugin(name: str, plugin_class: type["BasePlugin"]) -> None:
+ r"""Register a multimodal plugin."""
if name in PLUGINS:
raise ValueError(f"Multimodal plugin {name} already exists.")
@@ -2178,7 +2243,8 @@ audio_token: str | None = None,
**kwargs,
) -> "BasePlugin":
+ r"""Get plugin for multimodal inputs."""
if name not in PLUGINS:
raise ValueError(f"Multimodal plugin `{name}` not found.")
- return PLUGINS[name](image_token, video_token, audio_token, **kwargs)+ return PLUGINS[name](image_token, video_token, audio_token, **kwargs)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/data/mm_plugin.py |
Create documentation for each function signature | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from enum import StrEnum, unique
from typing import TYPE_CHECKING, Any, Optional, TypedDict, Union
import fsspec
from datasets import DatasetDict, concatenate_datasets, interleave_datasets
from ..extras import logging
if TYPE_CHECKING:
from datasets import Dataset, IterableDataset
from ..hparams import DataArguments
logger = logging.get_logger(__name__)
SLOTS = list[Union[str, set[str], dict[str, str]]]
@unique
class Role(StrEnum):
USER = "user"
ASSISTANT = "assistant"
SYSTEM = "system"
FUNCTION = "function"
OBSERVATION = "observation"
class DatasetModule(TypedDict):
train_dataset: Optional[Union["Dataset", "IterableDataset"]]
eval_dataset: Optional[Union["Dataset", "IterableDataset", dict[str, "Dataset"]]]
def merge_dataset(
all_datasets: list[Union["Dataset", "IterableDataset"]], data_args: "DataArguments", seed: int
) -> Union["Dataset", "IterableDataset"]:
if len(all_datasets) == 1:
return all_datasets[0]
elif data_args.mix_strategy == "concat":
if data_args.streaming:
logger.warning_rank0_once("The samples between different datasets will not be mixed in streaming mode.")
return concatenate_datasets(all_datasets)
elif data_args.mix_strategy.startswith("interleave"):
if not data_args.streaming:
logger.warning_rank0_once("We recommend using `mix_strategy=concat` in non-streaming mode.")
strategy_map: str = {
"interleave_under": "first_exhausted",
"interleave_over": "all_exhausted",
"interleave_once": "all_exhausted_without_replacement",
}[data_args.mix_strategy]
return interleave_datasets(
datasets=all_datasets,
probabilities=data_args.interleave_probs,
seed=seed,
stopping_strategy=strategy_map, # type: ignore
)
else:
raise ValueError(f"Unknown mixing strategy: {data_args.mix_strategy}.")
def split_dataset(
dataset: Optional[Union["Dataset", "IterableDataset"]],
eval_dataset: Optional[Union["Dataset", "IterableDataset", dict[str, "Dataset"]]],
data_args: "DataArguments",
seed: int,
) -> tuple[dict, dict]:
if eval_dataset is not None and data_args.val_size > 1e-6:
raise ValueError("Cannot specify `val_size` if `eval_dataset` is not None.")
# the train and eval better to in dict dtype and separately return for cpode clearly and good handle outside
train_dict, eval_dict = {}, {}
if dataset is not None:
if data_args.streaming:
dataset = dataset.shuffle(buffer_size=data_args.buffer_size, seed=seed)
if data_args.val_size > 1e-6:
if data_args.streaming:
eval_dict["validation"] = dataset.take(int(data_args.val_size))
train_dict["train"] = dataset.skip(int(data_args.val_size))
else:
val_size = int(data_args.val_size) if data_args.val_size > 1 else data_args.val_size
split_result = dataset.train_test_split(test_size=val_size, seed=seed)
train_dict["train"] = split_result["train"]
eval_dict["validation"] = split_result["test"]
else:
train_dict["train"] = dataset
if eval_dataset is not None:
if isinstance(eval_dataset, dict):
for name, data in eval_dataset.items():
eval_dict[f"validation_{name}"] = data
else:
if data_args.streaming:
eval_dataset = eval_dataset.shuffle(buffer_size=data_args.buffer_size, seed=seed)
eval_dict["validation"] = eval_dataset
return train_dict, eval_dict
def get_dataset_module(dataset: Union["Dataset", "DatasetDict"]) -> "DatasetModule":
dataset_module: DatasetModule = {}
if isinstance(dataset, DatasetDict): # dataset dict
if "train" in dataset:
dataset_module["train_dataset"] = dataset["train"]
if "validation" in dataset:
dataset_module["eval_dataset"] = dataset["validation"]
else:
eval_dataset = {}
for key in dataset.keys():
if key.startswith("validation_"):
eval_dataset[key[len("validation_") :]] = dataset[key]
if len(eval_dataset):
dataset_module["eval_dataset"] = eval_dataset
else: # single dataset
dataset_module["train_dataset"] = dataset
return dataset_module
def setup_fs(path: str, anon: bool = False) -> "fsspec.AbstractFileSystem":
storage_options = {"anon": anon} if anon else {}
if path.startswith("s3://"):
fs = fsspec.filesystem("s3", **storage_options)
elif path.startswith(("gs://", "gcs://")):
fs = fsspec.filesystem("gcs", **storage_options)
else:
raise ValueError(f"Unsupported protocol in path: {path}. Use 's3://' or 'gs://'.")
if not fs.exists(path):
raise ValueError(f"Path does not exist: {path}.")
return fs
def _read_json_with_fs(fs: "fsspec.AbstractFileSystem", path: str) -> list[Any]:
with fs.open(path, "r") as f:
if path.endswith(".jsonl"):
return [json.loads(line) for line in f if line.strip()]
else:
return json.load(f)
def read_cloud_json(cloud_path: str) -> list[Any]:
try:
fs = setup_fs(cloud_path, anon=True) # try with anonymous access first
except Exception:
fs = setup_fs(cloud_path) # try again with credentials
# filter out non-JSON files
files = [x["Key"] for x in fs.listdir(cloud_path)] if fs.isdir(cloud_path) else [cloud_path]
files = list(filter(lambda file: file.endswith(".json") or file.endswith(".jsonl"), files))
if not files:
raise ValueError(f"No JSON/JSONL files found in the specified path: {cloud_path}.")
return sum([_read_json_with_fs(fs, file) for file in files], []) | --- +++ @@ -51,6 +51,7 @@ def merge_dataset(
all_datasets: list[Union["Dataset", "IterableDataset"]], data_args: "DataArguments", seed: int
) -> Union["Dataset", "IterableDataset"]:
+ r"""Merge multiple datasets to a unified dataset."""
if len(all_datasets) == 1:
return all_datasets[0]
@@ -87,6 +88,14 @@ data_args: "DataArguments",
seed: int,
) -> tuple[dict, dict]:
+ r"""Split the dataset and returns two dicts containing train set and validation set.
+
+ Support both map dataset and iterable dataset.
+
+ Returns:
+ train_dict: Dictionary containing training data with key "train"
+ eval_dict: Dictionary containing evaluation data with keys "validation" or "validation_{name}"
+ """
if eval_dataset is not None and data_args.val_size > 1e-6:
raise ValueError("Cannot specify `val_size` if `eval_dataset` is not None.")
@@ -123,6 +132,7 @@
def get_dataset_module(dataset: Union["Dataset", "DatasetDict"]) -> "DatasetModule":
+ r"""Convert dataset or dataset dict to dataset module."""
dataset_module: DatasetModule = {}
if isinstance(dataset, DatasetDict): # dataset dict
if "train" in dataset:
@@ -146,6 +156,7 @@
def setup_fs(path: str, anon: bool = False) -> "fsspec.AbstractFileSystem":
+ r"""Set up a filesystem object based on the path protocol."""
storage_options = {"anon": anon} if anon else {}
if path.startswith("s3://"):
fs = fsspec.filesystem("s3", **storage_options)
@@ -161,6 +172,7 @@
def _read_json_with_fs(fs: "fsspec.AbstractFileSystem", path: str) -> list[Any]:
+ r"""Helper function to read JSON/JSONL files using fsspec."""
with fs.open(path, "r") as f:
if path.endswith(".jsonl"):
return [json.loads(line) for line in f if line.strip()]
@@ -169,6 +181,14 @@
def read_cloud_json(cloud_path: str) -> list[Any]:
+ r"""Read a JSON/JSONL file from cloud storage (S3 or GCS).
+
+ Args:
+ cloud_path: str
+ Cloud path in the format:
+ - 's3://bucket-name/file.json' for AWS S3
+ - 'gs://bucket-name/file.jsonl' or 'gcs://bucket-name/file.jsonl' for Google Cloud Storage
+ """
try:
fs = setup_fs(cloud_path, anon=True) # try with anonymous access first
except Exception:
@@ -180,4 +200,4 @@ if not files:
raise ValueError(f"No JSON/JSONL files found in the specified path: {cloud_path}.")
- return sum([_read_json_with_fs(fs, file) for file in files], [])+ return sum([_read_json_with_fs(fs, file) for file in files], [])
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/data/data_utils.py |
Create simple docstrings for beginners | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import json
import os
import socket
from typing import TYPE_CHECKING, Any
from urllib.parse import urlparse
from ..extras.misc import is_env_enabled
from ..extras.packages import is_fastapi_available
if is_fastapi_available():
from fastapi import HTTPException, status
if TYPE_CHECKING:
from pydantic import BaseModel
SAFE_MEDIA_PATH = os.environ.get("SAFE_MEDIA_PATH", os.path.join(os.path.dirname(__file__), "safe_media"))
ALLOW_LOCAL_FILES = is_env_enabled("ALLOW_LOCAL_FILES", "1")
def dictify(data: "BaseModel") -> dict[str, Any]:
try: # pydantic v2
return data.model_dump(exclude_unset=True)
except AttributeError: # pydantic v1
return data.dict(exclude_unset=True)
def jsonify(data: "BaseModel") -> str:
try: # pydantic v2
return json.dumps(data.model_dump(exclude_unset=True), ensure_ascii=False)
except AttributeError: # pydantic v1
return data.json(exclude_unset=True, ensure_ascii=False)
def check_lfi_path(path: str) -> None:
if not ALLOW_LOCAL_FILES:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Local file access is disabled.")
try:
os.makedirs(SAFE_MEDIA_PATH, exist_ok=True)
real_path = os.path.realpath(path)
safe_path = os.path.realpath(SAFE_MEDIA_PATH)
if not real_path.startswith(safe_path):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN, detail="File access is restricted to the safe media directory."
)
except Exception:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid or inaccessible file path.")
def check_ssrf_url(url: str) -> None:
try:
parsed_url = urlparse(url)
if parsed_url.scheme not in ["http", "https"]:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Only HTTP/HTTPS URLs are allowed.")
hostname = parsed_url.hostname
if not hostname:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid URL hostname.")
ip_info = socket.getaddrinfo(hostname, parsed_url.port)
ip_address_str = ip_info[0][4][0]
ip = ipaddress.ip_address(ip_address_str)
if not ip.is_global:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Access to private or reserved IP addresses is not allowed.",
)
except socket.gaierror:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST, detail=f"Could not resolve hostname: {parsed_url.hostname}"
)
except Exception as e:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"Invalid URL: {e}") | --- +++ @@ -50,6 +50,7 @@
def check_lfi_path(path: str) -> None:
+ """Checks if a given path is vulnerable to LFI. Raises HTTPException if unsafe."""
if not ALLOW_LOCAL_FILES:
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Local file access is disabled.")
@@ -67,6 +68,7 @@
def check_ssrf_url(url: str) -> None:
+ """Checks if a given URL is vulnerable to SSRF. Raises HTTPException if unsafe."""
try:
parsed_url = urlparse(url)
if parsed_url.scheme not in ["http", "https"]:
@@ -91,4 +93,4 @@ status_code=status.HTTP_400_BAD_REQUEST, detail=f"Could not resolve hostname: {parsed_url.hostname}"
)
except Exception as e:
- raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"Invalid URL: {e}")+ raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"Invalid URL: {e}")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/api/common.py |
Create docstrings for each class method | # Copyright 2025 THUDM and the LlamaFactory team.
#
# This code is inspired by the THUDM's ChatGLM implementation.
# https://github.com/THUDM/ChatGLM-6B/blob/main/cli_demo.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
from collections.abc import AsyncGenerator, Generator
from threading import Thread
from typing import TYPE_CHECKING, Any, Optional
from ..extras.constants import EngineName
from ..extras.misc import torch_gc
from ..hparams import get_infer_args
if TYPE_CHECKING:
from ..data.mm_plugin import AudioInput, ImageInput, VideoInput
from .base_engine import BaseEngine, Response
def _start_background_loop(loop: "asyncio.AbstractEventLoop") -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
class ChatModel:
def __init__(self, args: Optional[dict[str, Any]] = None) -> None:
model_args, data_args, finetuning_args, generating_args = get_infer_args(args)
if model_args.infer_backend == EngineName.HF:
from .hf_engine import HuggingfaceEngine
self.engine: BaseEngine = HuggingfaceEngine(model_args, data_args, finetuning_args, generating_args)
elif model_args.infer_backend == EngineName.VLLM:
try:
from .vllm_engine import VllmEngine
self.engine: BaseEngine = VllmEngine(model_args, data_args, finetuning_args, generating_args)
except ImportError as e:
raise ImportError(
"vLLM not install, you may need to run `pip install vllm`\n"
"or try to use HuggingFace backend: --infer_backend huggingface"
) from e
elif model_args.infer_backend == EngineName.SGLANG:
try:
from .sglang_engine import SGLangEngine
self.engine: BaseEngine = SGLangEngine(model_args, data_args, finetuning_args, generating_args)
except ImportError as e:
raise ImportError(
"SGLang not install, you may need to run `pip install sglang[all]`\n"
"or try to use HuggingFace backend: --infer_backend huggingface"
) from e
elif model_args.infer_backend == EngineName.KT:
try:
from .kt_engine import KTransformersEngine
self.engine: BaseEngine = KTransformersEngine(model_args, data_args, finetuning_args, generating_args)
except ImportError as e:
raise ImportError(
"KTransformers not install, you may need to run `pip install ktransformers`\n"
"or try to use HuggingFace backend: --infer_backend huggingface"
) from e
else:
raise NotImplementedError(f"Unknown backend: {model_args.infer_backend}")
self._loop = asyncio.new_event_loop()
self._thread = Thread(target=_start_background_loop, args=(self._loop,), daemon=True)
self._thread.start()
def chat(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> list["Response"]:
task = asyncio.run_coroutine_threadsafe(
self.achat(messages, system, tools, images, videos, audios, **input_kwargs), self._loop
)
return task.result()
async def achat(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> list["Response"]:
return await self.engine.chat(messages, system, tools, images, videos, audios, **input_kwargs)
def stream_chat(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> Generator[str, None, None]:
generator = self.astream_chat(messages, system, tools, images, videos, audios, **input_kwargs)
while True:
try:
task = asyncio.run_coroutine_threadsafe(generator.__anext__(), self._loop)
yield task.result()
except StopAsyncIteration:
break
async def astream_chat(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
async for new_token in self.engine.stream_chat(
messages, system, tools, images, videos, audios, **input_kwargs
):
yield new_token
def get_scores(
self,
batch_input: list[str],
**input_kwargs,
) -> list[float]:
task = asyncio.run_coroutine_threadsafe(self.aget_scores(batch_input, **input_kwargs), self._loop)
return task.result()
async def aget_scores(
self,
batch_input: list[str],
**input_kwargs,
) -> list[float]:
return await self.engine.get_scores(batch_input, **input_kwargs)
def run_chat() -> None:
if os.name != "nt":
try:
import readline # noqa: F401
except ImportError:
print("Install `readline` for a better experience.")
chat_model = ChatModel()
messages = []
print("Welcome to the CLI application, use `clear` to remove the history, use `exit` to exit the application.")
while True:
try:
query = input("\nUser: ")
except UnicodeDecodeError:
print("Detected decoding error at the inputs, please set the terminal encoding to utf-8.")
continue
except Exception:
raise
if query.strip() == "exit":
break
if query.strip() == "clear":
messages = []
torch_gc()
print("History has been removed.")
continue
messages.append({"role": "user", "content": query})
print("Assistant: ", end="", flush=True)
response = ""
for new_text in chat_model.stream_chat(messages):
print(new_text, end="", flush=True)
response += new_text
print()
messages.append({"role": "assistant", "content": response}) | --- +++ @@ -37,6 +37,12 @@
class ChatModel:
+ r"""General class for chat models. Backed by huggingface or vllm engines.
+
+ Supports both sync and async methods.
+ Sync methods: chat(), stream_chat() and get_scores().
+ Async methods: achat(), astream_chat() and aget_scores().
+ """
def __init__(self, args: Optional[dict[str, Any]] = None) -> None:
model_args, data_args, finetuning_args, generating_args = get_infer_args(args)
@@ -92,6 +98,7 @@ audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> list["Response"]:
+ r"""Get a list of responses of the chat model."""
task = asyncio.run_coroutine_threadsafe(
self.achat(messages, system, tools, images, videos, audios, **input_kwargs), self._loop
)
@@ -107,6 +114,7 @@ audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> list["Response"]:
+ r"""Asynchronously get a list of responses of the chat model."""
return await self.engine.chat(messages, system, tools, images, videos, audios, **input_kwargs)
def stream_chat(
@@ -119,6 +127,7 @@ audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> Generator[str, None, None]:
+ r"""Get the response token-by-token of the chat model."""
generator = self.astream_chat(messages, system, tools, images, videos, audios, **input_kwargs)
while True:
try:
@@ -137,6 +146,7 @@ audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
+ r"""Asynchronously get the response token-by-token of the chat model."""
async for new_token in self.engine.stream_chat(
messages, system, tools, images, videos, audios, **input_kwargs
):
@@ -147,6 +157,7 @@ batch_input: list[str],
**input_kwargs,
) -> list[float]:
+ r"""Get a list of scores of the reward model."""
task = asyncio.run_coroutine_threadsafe(self.aget_scores(batch_input, **input_kwargs), self._loop)
return task.result()
@@ -155,6 +166,7 @@ batch_input: list[str],
**input_kwargs,
) -> list[float]:
+ r"""Asynchronously get a list of scores of the reward model."""
return await self.engine.get_scores(batch_input, **input_kwargs)
@@ -195,4 +207,4 @@ print(new_text, end="", flush=True)
response += new_text
print()
- messages.append({"role": "assistant", "content": response})+ messages.append({"role": "assistant", "content": response})
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/chat/chat_model.py |
Add docstrings for utility scripts | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import os
from typing import Any
from transformers.trainer import TRAINER_STATE_NAME
from . import logging
from .packages import is_matplotlib_available
if is_matplotlib_available():
import matplotlib.figure
import matplotlib.pyplot as plt
logger = logging.get_logger(__name__)
def smooth(scalars: list[float]) -> list[float]:
if len(scalars) == 0:
return []
last = scalars[0]
smoothed = []
weight = 1.8 * (1 / (1 + math.exp(-0.05 * len(scalars))) - 0.5) # a sigmoid function
for next_val in scalars:
smoothed_val = last * weight + (1 - weight) * next_val
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
def gen_loss_plot(trainer_log: list[dict[str, Any]]) -> "matplotlib.figure.Figure":
plt.close("all")
plt.switch_backend("agg")
fig = plt.figure()
ax = fig.add_subplot(111)
steps, losses = [], []
for log in trainer_log:
if log.get("loss", None):
steps.append(log["current_steps"])
losses.append(log["loss"])
ax.plot(steps, losses, color="#1f77b4", alpha=0.4, label="original")
ax.plot(steps, smooth(losses), color="#1f77b4", label="smoothed")
ax.legend()
ax.set_xlabel("step")
ax.set_ylabel("loss")
return fig
def plot_loss(save_dictionary: str, keys: list[str] = ["loss"]) -> None:
plt.switch_backend("agg")
with open(os.path.join(save_dictionary, TRAINER_STATE_NAME), encoding="utf-8") as f:
data = json.load(f)
for key in keys:
steps, metrics = [], []
for i in range(len(data["log_history"])):
if key in data["log_history"][i]:
steps.append(data["log_history"][i]["step"])
metrics.append(data["log_history"][i][key])
if len(metrics) == 0:
logger.warning_rank0(f"No metric {key} to plot.")
continue
plt.figure()
plt.plot(steps, metrics, color="#1f77b4", alpha=0.4, label="original")
plt.plot(steps, smooth(metrics), color="#1f77b4", label="smoothed")
plt.title(f"training {key} of {save_dictionary}")
plt.xlabel("step")
plt.ylabel(key)
plt.legend()
figure_path = os.path.join(save_dictionary, "training_{}.png".format(key.replace("/", "_")))
plt.savefig(figure_path, format="png", dpi=100)
print("Figure saved at:", figure_path) | --- +++ @@ -32,6 +32,7 @@
def smooth(scalars: list[float]) -> list[float]:
+ r"""EMA implementation according to TensorBoard."""
if len(scalars) == 0:
return []
@@ -46,6 +47,7 @@
def gen_loss_plot(trainer_log: list[dict[str, Any]]) -> "matplotlib.figure.Figure":
+ r"""Plot loss curves in LlamaBoard."""
plt.close("all")
plt.switch_backend("agg")
fig = plt.figure()
@@ -65,6 +67,7 @@
def plot_loss(save_dictionary: str, keys: list[str] = ["loss"]) -> None:
+ r"""Plot loss curves and saves the image."""
plt.switch_backend("agg")
with open(os.path.join(save_dictionary, TRAINER_STATE_NAME), encoding="utf-8") as f:
data = json.load(f)
@@ -89,4 +92,4 @@ plt.legend()
figure_path = os.path.join(save_dictionary, "training_{}.png".format(key.replace("/", "_")))
plt.savefig(figure_path, format="png", dpi=100)
- print("Figure saved at:", figure_path)+ print("Figure saved at:", figure_path)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/extras/ploting.py |
Write beginner-friendly docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import json
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime
from typing import Any, NamedTuple, Union
from typing_extensions import override
class FunctionCall(NamedTuple):
name: str
arguments: str
DEFAULT_TOOL_PROMPT = (
"You have access to the following tools:\n{tool_text}"
"Use the following format if using a tool:\n"
"```\n"
"Action: tool name (one of [{tool_names}])\n"
"Action Input: the input to the tool, in a JSON format representing the kwargs "
"""(e.g. ```{{"input": "hello world", "num_beams": 5}}```)\n"""
"```\n"
)
GLM4_TOOL_PROMPT = (
"你是一个名为 ChatGLM 的人工智能助手。你是基于智谱 AI 公司训练的语言模型 GLM-4 模型开发的,"
"你的任务是针对用户的问题和要求提供适当的答复和支持。\n\n# 可用工具{tool_text}"
)
GLM4_MOE_TOOL_PROMPT = (
"\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
"You are provided with function signatures within <tools></tools> XML tags:\n<tools>{tool_text}"
"\n</tools>\n\nFor each function call, output the function name and arguments within the following XML format:"
"\n<tool_call>{{function-name}}"
"\n<arg_key>{{arg-key-1}}</arg_key>"
"\n<arg_value>{{arg-value-1}}</arg_value>"
"\n<arg_key>{{arg-key-2}}</arg_key>"
"\n<arg_value>{{arg-value-2}}</arg_value>"
"\n...\n</tool_call>\n"
)
LLAMA3_TOOL_PROMPT = (
"Cutting Knowledge Date: December 2023\nToday Date: {date}\n\n"
"You have access to the following functions. To call a function, please respond with JSON for a function call. "
"""Respond in the format {{"name": function name, "parameters": dictionary of argument name and its value}}. """
"Do not use variables.\n\n{tool_text}"
)
MINIMAX_M1_TOOL_PROMPT = (
"You are provided with these tools:\n<tools>\n{tool_text}</tools>\n\n"
"If you need to call tools, please respond with <tool_calls></tool_calls> XML tags, and provide tool-name and "
"json-object of arguments, following the format below:\n<tool_calls>\n"
"""{{"name": <tool-name-1>, "arguments": <args-json-object-1>}}\n...\n</tool_calls>"""
)
MINIMAX_M2_TOOL_PROMPT = (
"\n\n# Tools\n\nYou may call one or more tools to assist with the user query.\n"
"Here are the tools available in JSONSchema format:\n\n<tools>\n{tool_text}</tools>\n\n"
"When making tool calls, use XML format to invoke tools and pass parameters:\n"
"""\n<minimax:tool_call>\n<invoke name="tool-name-1">\n<parameter name="param-key-1">param-value-1</parameter>\n"""
"""<parameter name="param-key-2">param-value-2</parameter>\n...\n</invoke>\n</minimax:tool_call>"""
)
QWEN_TOOL_PROMPT = (
"\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
"You are provided with function signatures within <tools></tools> XML tags:\n<tools>{tool_text}"
"\n</tools>\n\nFor each function call, return a json object with function name and arguments within "
"""<tool_call></tool_call> XML tags:\n<tool_call>\n{{"name": <function-name>, """
""""arguments": <args-json-object>}}\n</tool_call>"""
)
QWEN35_TOOL_PROMPT = (
"\n\n# Tools\n\nYou have access to the following functions:\n\n<tools>{tool_text}"
"\n</tools>\n\nIf you choose to call a function ONLY reply in the following format with NO suffix:\n\n"
"<tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>\nvalue_1\n</parameter>\n"
"<parameter=example_parameter_2>\nThis is the value for the second parameter\nthat can span\nmultiple lines\n"
"</parameter>\n</function>\n</tool_call>\n\n<IMPORTANT>\nReminder:\n"
"- Function calls MUST follow the specified format: "
"an inner <function=...></function> block must be nested within <tool_call></tool_call> XML tags\n"
"- Required parameters MUST be specified\n"
"- You may provide optional reasoning for your function call in natural language "
"BEFORE the function call, but NOT after\n"
"- If there is no function call available, answer the question like normal with your current knowledge "
"and do not tell the user about function calls\n</IMPORTANT>"
)
SEED_TOOL_PROMPT = (
"system\nYou are Doubao, a helpful AI assistant. You may call one or more functions to assist with the user query."
"Tool List:\nYou are authorized to use the following tools (described in JSON Schema format). Before performing "
"any task, you must decide how to call them based on the descriptions and parameters of these tools.{tool_text}\n"
"工具调用请遵循如下格式:\n<seed:tool_call>\n<function=example_function_name>\n<parameter=example_parameter_1>value_1"
"</parameter>\n<parameter=example_parameter_2>This is the value for the second parameter\nthat can span\nmultiple "
"lines</parameter>\n</function>\n</seed:tool_call>\n"
)
LING_TOOL_PROMPT = (
"# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
"You are provided with function signatures within <tools></tools> XML tags:\n<tools>{tool_text}"
"\n</tools>\n\nFor each function call, return a json object with function name and arguments within "
"""<tool_call></tool_call> XML tags:\n<tool_call>\n{{"name": <function-name>, """
""""arguments": <args-json-object>}}\n</tool_call>"""
)
LFM2_TOOL_PROMPT = "List of tools: <|tool_list_start|>{tool_text}<|tool_list_end|>"
@dataclass
class ToolUtils(ABC):
@staticmethod
@abstractmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
...
@staticmethod
@abstractmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
...
@staticmethod
@abstractmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
...
class DefaultToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
tool_text = ""
tool_names = []
for tool in tools:
tool = tool.get("function", "") if tool.get("type") == "function" else tool
param_text = ""
for name, param in tool["parameters"]["properties"].items():
required, enum, items = "", "", ""
if name in tool["parameters"].get("required", []):
required = ", required"
if param.get("enum", None):
enum = ", should be one of [{}]".format(", ".join(param["enum"]))
if param.get("items", None):
items = ", where each item should be {}".format(param["items"].get("type", ""))
param_text += " - {name} ({type}{required}): {desc}{enum}{items}\n".format(
name=name,
type=param.get("type", ""),
required=required,
desc=param.get("description", ""),
enum=enum,
items=items,
)
tool_text += "> Tool Name: {name}\nTool Description: {desc}\nTool Args:\n{args}\n".format(
name=tool["name"], desc=tool.get("description", ""), args=param_text
)
tool_names.append(tool["name"])
return DEFAULT_TOOL_PROMPT.format(tool_text=tool_text, tool_names=", ".join(tool_names))
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
return "\n".join([f"Action: {name}\nAction Input: {arguments}" for name, arguments in functions])
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
regex = re.compile(r"Action:\s*([a-zA-Z0-9_]+)\s*Action Input:\s*(.+?)(?=\s*Action:|\s*$)", re.DOTALL)
action_match: list[tuple[str, str]] = re.findall(regex, content)
if not action_match:
return content
results = []
for match in action_match:
tool_name = match[0].strip()
tool_input = match[1].strip().strip('"').strip("```")
try:
arguments = json.loads(tool_input)
results.append(FunctionCall(tool_name, json.dumps(arguments, ensure_ascii=False)))
except json.JSONDecodeError:
return content
return results
class GLM4ToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
tool_text = ""
for tool in tools:
tool = tool.get("function", "") if tool.get("type") == "function" else tool
tool_text += "\n\n## {name}\n\n{body}\n在调用上述函数时,请使用 Json 格式表示调用的参数。".format(
name=tool["name"], body=json.dumps(tool, indent=4, ensure_ascii=False)
)
return GLM4_TOOL_PROMPT.format(tool_text=tool_text)
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
if len(functions) > 1:
raise ValueError("GLM-4 does not support parallel functions.")
return f"{functions[0].name}\n{functions[0].arguments}"
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
if "\n" not in content:
return content
tool_name, tool_input = content.split("\n", maxsplit=1)
try:
arguments = json.loads(tool_input.strip())
except json.JSONDecodeError:
return content
return [FunctionCall(tool_name, json.dumps(arguments, ensure_ascii=False))]
class Llama3ToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
date = datetime.now().strftime("%d %b %Y")
tool_text = ""
for tool in tools:
wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool}
tool_text += json.dumps(wrapped_tool, indent=4, ensure_ascii=False) + "\n\n"
return LLAMA3_TOOL_PROMPT.format(date=date, tool_text=tool_text)
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
function_objects = [{"name": name, "parameters": json.loads(arguments)} for name, arguments in functions]
return json.dumps(function_objects[0] if len(function_objects) == 1 else function_objects, ensure_ascii=False)
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
try:
tools = json.loads(content.strip())
except json.JSONDecodeError:
return content
tools = [tools] if not isinstance(tools, list) else tools
try:
return [FunctionCall(tool["name"], json.dumps(tool["parameters"], ensure_ascii=False)) for tool in tools]
except KeyError:
return content
class MiniMaxM1ToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
tool_text = ""
for tool in tools:
tool = tool.get("function", "") if tool.get("type") == "function" else tool
tool_text += json.dumps(tool, ensure_ascii=False) + "\n"
return MINIMAX_M1_TOOL_PROMPT.format(tool_text=tool_text)
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
function_texts = []
for func in functions:
name, arguments = func.name, json.loads(func.arguments)
function_texts.append(json.dumps({"name": name, "arguments": arguments}, ensure_ascii=False))
return "<tool_calls>\n" + "\n".join(function_texts) + "\n</tool_calls>"
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
regex = re.compile(r"<tool_calls>\s*(.+?)\s*</tool_calls>", re.DOTALL)
tool_match = re.search(regex, content)
if not tool_match:
return content
tool_calls_content = tool_match.group(1)
results = []
for line in tool_calls_content.split("\n"):
line = line.strip()
if not line:
continue
try:
tool_call = json.loads(line)
results.append(FunctionCall(tool_call["name"], json.dumps(tool_call["arguments"], ensure_ascii=False)))
except json.JSONDecodeError:
continue
return results
class MiniMaxM2ToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
tool_text = ""
for tool in tools:
tool = tool.get("function", "") if tool.get("type") == "function" else tool
tool_text += "<tool>" + json.dumps(tool, ensure_ascii=False) + "</tool>\n"
return MINIMAX_M2_TOOL_PROMPT.format(tool_text=tool_text)
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
function_texts = []
for func in functions:
name, arguments = func.name, json.loads(func.arguments)
prompt = f'<invoke name="{name}">'
for key, value in arguments.items():
prompt += f'\n<parameter name="{key}">'
if not isinstance(value, str):
value = json.dumps(value, ensure_ascii=False)
prompt += value + "</parameter>"
prompt += "\n</invoke>"
function_texts.append(prompt)
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
regex = re.compile(r"<minimax:tool_call>\s*(.+?)\s*</minimax:tool_call>", re.DOTALL)
tool_match = re.search(regex, content)
if not tool_match:
return content
tool_calls_content = tool_match.group(1)
invoke_regex = re.compile(r"<invoke name=\"(.*?)\">(.*?)</invoke>", re.DOTALL)
results = []
for func_name, params_block in re.findall(invoke_regex, tool_calls_content):
args_dict = {}
param_pattern = re.compile(r"<parameter name=\"(.*?)\">(.*?)</parameter>", re.DOTALL)
for key, raw_value in re.findall(param_pattern, params_block):
value = raw_value.strip()
try:
parsed_value = json.loads(value)
except json.JSONDecodeError:
parsed_value = raw_value
args_dict[key] = parsed_value
results.append(FunctionCall(func_name.strip(), json.dumps(args_dict, ensure_ascii=False)))
return results
class MistralToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
wrapped_tools = []
for tool in tools:
wrapped_tools.append(tool if tool.get("type") == "function" else {"type": "function", "function": tool})
return "[AVAILABLE_TOOLS] " + json.dumps(wrapped_tools, ensure_ascii=False) + "[/AVAILABLE_TOOLS]"
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
return json.dumps(
[{"name": name, "arguments": json.loads(arguments)} for name, arguments in functions], ensure_ascii=False
)
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
try:
tools = json.loads(content.strip())
except json.JSONDecodeError:
return content
tools = [tools] if not isinstance(tools, list) else tools
try:
return [FunctionCall(tool["name"], json.dumps(tool["arguments"], ensure_ascii=False)) for tool in tools]
except KeyError:
return content
class QwenToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
tool_text = ""
for tool in tools:
wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool}
tool_text += "\n" + json.dumps(wrapped_tool, ensure_ascii=False)
return QWEN_TOOL_PROMPT.format(tool_text=tool_text)
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
function_texts = [
json.dumps({"name": name, "arguments": json.loads(arguments)}, ensure_ascii=False)
for name, arguments in functions
]
return "\n".join([f"<tool_call>\n{text}\n</tool_call>" for text in function_texts])
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
regex = re.compile(r"<tool_call>(.+?)</tool_call>(?=\s*<tool_call>|\s*$)", re.DOTALL)
tool_match: list[str] = re.findall(regex, content)
if not tool_match:
return content
results = []
for tool in tool_match:
try:
tool = json.loads(tool.strip())
except json.JSONDecodeError:
return content
if "name" not in tool or "arguments" not in tool:
return content
results.append(FunctionCall(tool["name"], json.dumps(tool["arguments"], ensure_ascii=False)))
return results
class Qwen35ToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
tool_text = ""
for tool in tools:
tool = tool.get("function", tool) if tool.get("type") == "function" else tool
tool_text += "\n" + json.dumps(tool, ensure_ascii=False)
return QWEN35_TOOL_PROMPT.format(tool_text=tool_text)
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
function_texts = []
for func in functions:
name, arguments = func.name, json.loads(func.arguments)
prompt = f"<tool_call>\n<function={name}>"
for key, value in arguments.items():
prompt += f"\n<parameter={key}>"
if not isinstance(value, str):
value = json.dumps(value, ensure_ascii=False)
prompt += f"\n{value}\n</parameter>"
prompt += "\n</function>\n</tool_call>"
function_texts.append(prompt)
return "\n".join(function_texts)
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
results = []
regex = re.compile(r"<tool_call>\s*<function=\s*([^\s<>]+)\s*(.*?)\s*</function>\s*</tool_call>", re.DOTALL)
for func_name, params_block in re.findall(regex, content):
args_dict = {}
param_pattern = re.compile(r"<parameter=(.*?)>(.*?)</parameter>", re.DOTALL)
for key, raw_value in re.findall(param_pattern, params_block.strip()):
value = raw_value.strip()
try:
parsed_value = json.loads(value)
except json.JSONDecodeError:
parsed_value = raw_value.strip()
args_dict[key] = parsed_value
results.append(FunctionCall(func_name.strip(), json.dumps(args_dict, ensure_ascii=False)))
return results if results else content
class GLM4MOEToolUtils(QwenToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
tool_text = ""
for tool in tools:
wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool}
tool_text += "\n" + json.dumps(wrapped_tool, ensure_ascii=False)
return GLM4_MOE_TOOL_PROMPT.format(tool_text=tool_text)
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
function_json = [
{"func_name": name, "func_key_values": json.loads(arguments)} for name, arguments in functions
]
function_texts = []
for func in function_json:
prompt = "\n<tool_call>" + func["func_name"]
for key, value in func["func_key_values"].items():
prompt += "\n<arg_key>" + key + "</arg_key>"
if not isinstance(value, str):
value = json.dumps(value, ensure_ascii=False)
prompt += "\n<arg_value>" + value + "</arg_value>"
function_texts.append(prompt)
return "\n".join(function_texts)
class SeedToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
return SEED_TOOL_PROMPT.format(tool_text="\n" + json.dumps(tools, ensure_ascii=False))
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
function_json = [
{"func_name": name, "func_key_values": json.loads(arguments)} for name, arguments in functions
]
function_texts = []
for func in function_json:
prompt = "\n<seed:tool_call>\n<function=" + func["func_name"]
for key, value in func["func_key_values"].items():
prompt += "\n<parameter=" + key + ">"
if not isinstance(value, str):
value = json.dumps(value, ensure_ascii=False)
prompt += value + "</parameter>"
prompt += "\n</function>\n</seed:tool_call>"
function_texts.append(prompt)
return "\n".join(function_texts)
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
results = []
regex = re.compile(
r"<seed:tool_call>\s*<function=\s*([^\s<]+)\s*(.*?)\s*</function>\s*</seed:tool_call>", re.DOTALL
)
for func_name, params_block in re.findall(regex, content):
args_dict = {}
param_pattern = re.compile(r"<parameter=(.*?)>(.*?)</parameter>", re.DOTALL)
for key, raw_value in re.findall(param_pattern, params_block.strip()):
value = raw_value.strip()
try:
parsed_value = json.loads(value)
except json.JSONDecodeError:
parsed_value = raw_value
args_dict[key] = parsed_value
results.append(FunctionCall(func_name.strip(), json.dumps(args_dict, ensure_ascii=False)))
return results
class LingToolUtils(QwenToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
tool_text = ""
for tool in tools:
wrapped_tool = tool if tool.get("type") == "function" else {"type": "function", "function": tool}
tool_text += "\n" + json.dumps(wrapped_tool, ensure_ascii=False)
return LING_TOOL_PROMPT.format(tool_text=tool_text) + "\n" + "detailed thinking off"
class LFM2ToolUtils(ToolUtils):
@override
@staticmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
tool_list = []
for tool in tools:
tool = tool.get("function", tool) if tool.get("type") == "function" else tool
tool_list.append(tool)
return LFM2_TOOL_PROMPT.format(tool_text=json.dumps(tool_list, ensure_ascii=False))
@override
@staticmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
calls = []
for name, args_json in functions:
args = json.loads(args_json)
kwargs_parts = []
for key, value in args.items():
if isinstance(value, str):
kwargs_parts.append(f'{key}="{value}"')
else:
kwargs_parts.append(f"{key}={json.dumps(value, ensure_ascii=False)}")
calls.append(f"{name}({', '.join(kwargs_parts)})")
return f"<|tool_call_start|>[{', '.join(calls)}]<|tool_call_end|>"
@staticmethod
def _ast_to_value(node: ast.AST) -> Any:
# Handle JSON-style true/false/null as Name nodes
if isinstance(node, ast.Name):
if node.id == "true":
return True
elif node.id == "false":
return False
elif node.id == "null":
return None
else:
raise ValueError(f"Unknown identifier: {node.id}")
# Use literal_eval for other cases (strings, numbers, lists, dicts)
return ast.literal_eval(node)
@override
@staticmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
# Extract content between tool call markers
start_marker = "<|tool_call_start|>"
end_marker = "<|tool_call_end|>"
start_idx = content.find(start_marker)
if start_idx == -1:
return content
end_idx = content.find(end_marker, start_idx)
if end_idx == -1:
return content
tool_call_str = content[start_idx + len(start_marker) : end_idx].strip()
# Parse Pythonic function call syntax using AST
try:
tree = ast.parse(tool_call_str, mode="eval")
except SyntaxError:
return content
# Handle both single call and list of calls
if isinstance(tree.body, ast.List):
call_nodes = tree.body.elts
elif isinstance(tree.body, ast.Call):
call_nodes = [tree.body]
else:
return content
results = []
for node in call_nodes:
if not isinstance(node, ast.Call):
return content
# Extract function name
if isinstance(node.func, ast.Name):
func_name = node.func.id
else:
return content
# Extract keyword arguments
args_dict = {}
for keyword in node.keywords:
key = keyword.arg
try:
value = LFM2ToolUtils._ast_to_value(keyword.value)
except (ValueError, SyntaxError):
return content
args_dict[key] = value
results.append(FunctionCall(func_name, json.dumps(args_dict, ensure_ascii=False)))
return results if results else content
TOOLS = {
"default": DefaultToolUtils(),
"glm4": GLM4ToolUtils(),
"llama3": Llama3ToolUtils(),
"lfm2": LFM2ToolUtils(),
"minimax1": MiniMaxM1ToolUtils(),
"minimax2": MiniMaxM2ToolUtils(),
"mistral": MistralToolUtils(),
"qwen": QwenToolUtils(),
"qwen3_5": Qwen35ToolUtils(),
"glm4_moe": GLM4MOEToolUtils(),
"seed_oss": SeedToolUtils(),
"ling": LingToolUtils(),
}
def get_tool_utils(name: str) -> "ToolUtils":
tool_utils = TOOLS.get(name, None)
if tool_utils is None:
raise ValueError(f"Tool utils `{name}` not found.")
return tool_utils | --- +++ @@ -122,24 +122,32 @@
@dataclass
class ToolUtils(ABC):
+ """Base class for tool utilities."""
@staticmethod
@abstractmethod
def tool_formatter(tools: list[dict[str, Any]]) -> str:
+ r"""Generate the system message describing all the available tools."""
...
@staticmethod
@abstractmethod
def function_formatter(functions: list["FunctionCall"]) -> str:
+ r"""Generate the assistant message including all the tool calls."""
...
@staticmethod
@abstractmethod
def tool_extractor(content: str) -> Union[str, list["FunctionCall"]]:
+ r"""Extract all the function calls from the assistant message.
+
+ It should be an inverse function of `function_formatter`.
+ """
...
class DefaultToolUtils(ToolUtils):
+ r"""Default tool using template."""
@override
@staticmethod
@@ -203,6 +211,7 @@
class GLM4ToolUtils(ToolUtils):
+ r"""GLM-4 tool using template."""
@override
@staticmethod
@@ -240,6 +249,10 @@
class Llama3ToolUtils(ToolUtils):
+ r"""Llama 3.x tool using template with `tools_in_user_message=False`.
+
+ Reference: https://www.llama.com/docs/model-cards-and-prompt-formats/llama3_1/#json-based-tool-calling
+ """
@override
@staticmethod
@@ -274,6 +287,7 @@
class MiniMaxM1ToolUtils(ToolUtils):
+ r"""MiniMax-M1 tool using template."""
@override
@staticmethod
@@ -320,6 +334,7 @@
class MiniMaxM2ToolUtils(ToolUtils):
+ r"""MiniMax-M2 tool using template."""
@override
@staticmethod
@@ -375,6 +390,7 @@
class MistralToolUtils(ToolUtils):
+ r"""Mistral v0.3 tool using template."""
@override
@staticmethod
@@ -408,6 +424,7 @@
class QwenToolUtils(ToolUtils):
+ r"""Qwen 2.5 tool using template."""
@override
@staticmethod
@@ -452,6 +469,7 @@
class Qwen35ToolUtils(ToolUtils):
+ r"""Qwen 3.5 tool using template."""
@override
@staticmethod
@@ -502,6 +520,7 @@
class GLM4MOEToolUtils(QwenToolUtils):
+ r"""GLM-4-MOE tool using template."""
@override
@staticmethod
@@ -533,6 +552,7 @@
class SeedToolUtils(ToolUtils):
+ r"""Seed tool using template."""
@override
@staticmethod
@@ -582,6 +602,7 @@
class LingToolUtils(QwenToolUtils):
+ r"""Ling v2 tool using template."""
@override
@staticmethod
@@ -595,6 +616,7 @@
class LFM2ToolUtils(ToolUtils):
+ r"""LFM2.5 tool using template with Pythonic function call syntax."""
@override
@staticmethod
@@ -625,6 +647,7 @@
@staticmethod
def _ast_to_value(node: ast.AST) -> Any:
+ """Convert an AST node to a Python value, handling JSON-style booleans/null."""
# Handle JSON-style true/false/null as Name nodes
if isinstance(node, ast.Name):
if node.id == "true":
@@ -717,4 +740,4 @@ if tool_utils is None:
raise ValueError(f"Tool utils `{name}` not found.")
- return tool_utils+ return tool_utils
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/data/tool_utils.py |
Add docstrings for production code | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import TYPE_CHECKING, Literal, Optional, Union
import numpy as np
from datasets import Dataset, DatasetDict, load_dataset, load_from_disk
from ..extras import logging
from ..extras.constants import FILEEXT2TYPE
from ..extras.misc import check_version, has_tokenized_data
from .converter import align_dataset
from .data_utils import get_dataset_module, merge_dataset, read_cloud_json, split_dataset
from .parser import get_dataset_list
from .processor import (
FeedbackDatasetProcessor,
PackedSupervisedDatasetProcessor,
PairwiseDatasetProcessor,
PretrainDatasetProcessor,
SupervisedDatasetProcessor,
UnsupervisedDatasetProcessor,
)
if TYPE_CHECKING:
from datasets import Dataset, IterableDataset
from transformers import PreTrainedTokenizer, ProcessorMixin, Seq2SeqTrainingArguments
from ..hparams import DataArguments, ModelArguments
from .data_utils import DatasetModule
from .parser import DatasetAttr
from .processor import DatasetProcessor
from .template import Template
logger = logging.get_logger(__name__)
def _load_single_dataset(
dataset_attr: "DatasetAttr",
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
) -> Union["Dataset", "IterableDataset"]:
logger.info_rank0(f"Loading dataset {dataset_attr}...")
data_path, data_name, data_dir, data_files = None, None, None, None
if dataset_attr.load_from in ["hf_hub", "ms_hub", "om_hub"]:
data_path = dataset_attr.dataset_name
data_name = dataset_attr.subset
data_dir = dataset_attr.folder
elif dataset_attr.load_from == "script":
data_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)
data_name = dataset_attr.subset
data_dir = dataset_attr.folder
elif dataset_attr.load_from == "cloud_file":
data_path = dataset_attr.dataset_name
elif dataset_attr.load_from == "file":
data_files = []
local_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)
if os.path.isdir(local_path): # is directory
for file_name in os.listdir(local_path):
data_files.append(os.path.join(local_path, file_name))
elif os.path.isfile(local_path): # is file
data_files.append(local_path)
else:
raise ValueError(f"File {local_path} not found.")
data_path = FILEEXT2TYPE.get(os.path.splitext(data_files[0])[-1][1:], None)
if data_path is None:
raise ValueError("Allowed file types: {}.".format(",".join(FILEEXT2TYPE.keys())))
if any(data_path != FILEEXT2TYPE.get(os.path.splitext(data_file)[-1][1:], None) for data_file in data_files):
raise ValueError("File types should be identical.")
else:
raise NotImplementedError(f"Unknown load type: {dataset_attr.load_from}.")
if dataset_attr.load_from == "ms_hub":
check_version("modelscope>=1.14.0", mandatory=True)
from modelscope import MsDataset # type: ignore
from modelscope.utils.config_ds import MS_DATASETS_CACHE # type: ignore
cache_dir = model_args.cache_dir or MS_DATASETS_CACHE
dataset = MsDataset.load(
dataset_name=data_path,
subset_name=data_name,
data_dir=data_dir,
data_files=data_files,
split=dataset_attr.split,
cache_dir=cache_dir,
token=model_args.ms_hub_token,
use_streaming=data_args.streaming,
)
if isinstance(dataset, MsDataset):
dataset = dataset.to_hf_dataset()
elif dataset_attr.load_from == "om_hub":
check_version("openmind>=0.8.0", mandatory=True)
from openmind import OmDataset # type: ignore
from openmind.utils.hub import OM_DATASETS_CACHE # type: ignore
cache_dir = model_args.cache_dir or OM_DATASETS_CACHE
dataset = OmDataset.load_dataset(
path=data_path,
name=data_name,
data_dir=data_dir,
data_files=data_files,
split=dataset_attr.split,
cache_dir=cache_dir,
token=model_args.om_hub_token,
streaming=data_args.streaming,
)
elif dataset_attr.load_from == "cloud_file":
dataset = Dataset.from_list(read_cloud_json(data_path), split=dataset_attr.split)
else:
dataset = load_dataset(
path=data_path,
name=data_name,
data_dir=data_dir,
data_files=data_files,
split=dataset_attr.split,
cache_dir=model_args.cache_dir,
token=model_args.hf_hub_token,
num_proc=data_args.preprocessing_num_workers,
streaming=data_args.streaming and dataset_attr.load_from != "file",
)
if data_args.streaming and dataset_attr.load_from == "file":
dataset = dataset.to_iterable_dataset(num_shards=training_args.dataloader_num_workers)
if dataset_attr.num_samples is not None and not data_args.streaming:
target_num = dataset_attr.num_samples
indexes = np.random.permutation(len(dataset))[:target_num] # all samples should be included
target_num -= len(indexes)
if target_num > 0:
expand_indexes = np.random.choice(len(dataset), target_num)
indexes = np.concatenate((indexes, expand_indexes), axis=0)
assert len(indexes) == dataset_attr.num_samples, "Sample num mismatched."
dataset = dataset.select(indexes)
logger.info_rank0(f"Sampled {dataset_attr.num_samples} examples from dataset {dataset_attr}.")
if data_args.max_samples is not None: # truncate dataset
max_samples = min(data_args.max_samples, len(dataset))
dataset = dataset.select(range(max_samples))
return align_dataset(dataset, dataset_attr, data_args, training_args)
def _get_merged_dataset(
dataset_names: list[str] | None,
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
stage: Literal["pt", "sft", "rm", "ppo", "kto"],
return_dict: bool = False,
) -> Union["Dataset", "IterableDataset", dict[str, "Dataset"]] | None:
if dataset_names is None:
return None
datasets = {}
for dataset_name, dataset_attr in zip(dataset_names, get_dataset_list(dataset_names, data_args.dataset_dir)):
if (stage == "rm" and dataset_attr.ranking is False) or (stage != "rm" and dataset_attr.ranking is True):
raise ValueError("The dataset is not applicable in the current training stage.")
datasets[dataset_name] = _load_single_dataset(dataset_attr, model_args, data_args, training_args)
if return_dict:
return datasets
else:
return merge_dataset(list(datasets.values()), data_args, seed=training_args.seed)
def _get_dataset_processor(
data_args: "DataArguments",
stage: Literal["pt", "sft", "rm", "ppo", "kto"],
template: "Template",
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"],
do_generate: bool = False,
) -> "DatasetProcessor":
if stage == "pt":
dataset_processor_class = PretrainDatasetProcessor
elif stage == "sft" and not do_generate:
if data_args.packing:
if data_args.neat_packing: # hack datasets to have int32 attention mask
from datasets.arrow_writer import OptimizedTypedSequence, TypedSequence
def __init__(self, data, **kwargs):
return TypedSequence.__init__(
self,
data,
type=kwargs.pop("type", None),
try_type=kwargs.pop("try_type", None),
optimized_int_type=kwargs.pop("optimized_int_type", None),
)
OptimizedTypedSequence.__init__ = __init__
dataset_processor_class = PackedSupervisedDatasetProcessor
else:
dataset_processor_class = SupervisedDatasetProcessor
elif stage == "rm":
dataset_processor_class = PairwiseDatasetProcessor
elif stage == "kto":
dataset_processor_class = FeedbackDatasetProcessor
else:
dataset_processor_class = UnsupervisedDatasetProcessor
return dataset_processor_class(template=template, tokenizer=tokenizer, processor=processor, data_args=data_args)
def _get_preprocessed_dataset(
dataset: Union["Dataset", "IterableDataset"] | None,
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
stage: Literal["pt", "sft", "rm", "ppo", "kto"],
template: "Template",
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"] = None,
is_eval: bool = False,
) -> Union["Dataset", "IterableDataset"] | None:
if dataset is None:
return None
dataset_processor = _get_dataset_processor(
data_args, stage, template, tokenizer, processor, do_generate=(training_args.predict_with_generate and is_eval)
)
column_names = list(next(iter(dataset)).keys())
kwargs = {}
if not data_args.streaming:
kwargs = dict(
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=(not data_args.overwrite_cache) or (training_args.local_process_index != 0),
desc="Running tokenizer on dataset",
)
dataset = dataset.map(
dataset_processor.preprocess_dataset,
batched=True,
batch_size=data_args.preprocessing_batch_size,
remove_columns=column_names,
**kwargs,
)
if training_args.should_log:
try:
print("eval example:" if is_eval else "training example:")
dataset_processor.print_data_example(next(iter(dataset)))
except StopIteration:
if stage == "pt":
raise RuntimeError("Cannot find sufficient samples, consider increasing dataset size.")
else:
raise RuntimeError("Cannot find valid samples, check `data/README.md` for the data format.")
return dataset
def get_dataset(
template: "Template",
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
stage: Literal["pt", "sft", "rm", "ppo", "kto"],
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"] = None,
) -> "DatasetModule":
# Load tokenized dataset if path exists
if data_args.tokenized_path is not None:
if has_tokenized_data(data_args.tokenized_path):
logger.warning_rank0("Loading dataset from disk will ignore other data arguments.")
tokenized_data = load_from_disk(data_args.tokenized_path)
dataset_module = get_dataset_module(tokenized_data)
if data_args.streaming:
dataset_module["train_dataset"] = dataset_module["train_dataset"].to_iterable_dataset()
logger.info_rank0(f"Loaded tokenized dataset from {data_args.tokenized_path}.")
return dataset_module
if data_args.streaming:
raise ValueError("Turn off `streaming` when saving dataset to disk.")
# Load and preprocess dataset
with training_args.main_process_first(desc="load dataset", local=(not data_args.data_shared_file_system)):
dataset = _get_merged_dataset(data_args.dataset, model_args, data_args, training_args, stage)
eval_dataset = _get_merged_dataset(
data_args.eval_dataset,
model_args,
data_args,
training_args,
stage,
return_dict=data_args.eval_on_each_dataset,
)
with training_args.main_process_first(desc="pre-process dataset", local=(not data_args.data_shared_file_system)):
# move front to make sure eval_dataset(if contain or split) can preprocessed appropriately
train_dict, eval_dict = split_dataset(dataset, eval_dataset, data_args, seed=training_args.seed)
if "train" in train_dict:
train_dict["train"] = _get_preprocessed_dataset(
train_dict["train"], data_args, training_args, stage, template, tokenizer, processor, is_eval=False
)
for key in eval_dict:
eval_dict[key] = _get_preprocessed_dataset(
eval_dict[key], data_args, training_args, stage, template, tokenizer, processor, is_eval=True
)
# Combine train and eval dictionaries
dataset_dict = DatasetDict({**train_dict, **eval_dict})
if data_args.tokenized_path is not None: # save tokenized dataset to disk
if training_args.should_save:
dataset_dict.save_to_disk(data_args.tokenized_path)
logger.info_rank0(f"Tokenized dataset is saved at {data_args.tokenized_path}.")
logger.info_rank0(f"Please launch the training with `tokenized_path: {data_args.tokenized_path}`.")
return get_dataset_module(dataset_dict) | --- +++ @@ -54,6 +54,7 @@ data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
) -> Union["Dataset", "IterableDataset"]:
+ r"""Load a single dataset and aligns it to the standard format."""
logger.info_rank0(f"Loading dataset {dataset_attr}...")
data_path, data_name, data_dir, data_files = None, None, None, None
if dataset_attr.load_from in ["hf_hub", "ms_hub", "om_hub"]:
@@ -168,6 +169,7 @@ stage: Literal["pt", "sft", "rm", "ppo", "kto"],
return_dict: bool = False,
) -> Union["Dataset", "IterableDataset", dict[str, "Dataset"]] | None:
+ r"""Return the merged datasets in the standard format."""
if dataset_names is None:
return None
@@ -192,6 +194,7 @@ processor: Optional["ProcessorMixin"],
do_generate: bool = False,
) -> "DatasetProcessor":
+ r"""Return the corresponding dataset processor."""
if stage == "pt":
dataset_processor_class = PretrainDatasetProcessor
elif stage == "sft" and not do_generate:
@@ -233,6 +236,7 @@ processor: Optional["ProcessorMixin"] = None,
is_eval: bool = False,
) -> Union["Dataset", "IterableDataset"] | None:
+ r"""Preprocesses the dataset, including format checking and tokenization."""
if dataset is None:
return None
@@ -278,6 +282,7 @@ tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"] = None,
) -> "DatasetModule":
+ r"""Get the train dataset and optionally gets the evaluation dataset."""
# Load tokenized dataset if path exists
if data_args.tokenized_path is not None:
if has_tokenized_data(data_args.tokenized_path):
@@ -328,4 +333,4 @@ logger.info_rank0(f"Tokenized dataset is saved at {data_args.tokenized_path}.")
logger.info_rank0(f"Please launch the training with `tokenized_path: {data_args.tokenized_path}`.")
- return get_dataset_module(dataset_dict)+ return get_dataset_module(dataset_dict)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/data/loader.py |
Add docstrings to improve readability | # Copyright 2025 the KVCache.AI team, Approaching AI, and the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib.util as _u
from typing import TYPE_CHECKING, Any
import torch
from ...extras import logging
from ...extras.misc import get_current_device
if TYPE_CHECKING:
from ...hparams import FinetuningArguments, ModelArguments
from transformers import AutoConfig, AutoModelForCausalLM, PretrainedConfig, PreTrainedModel
KT_AVAILABLE = _u.find_spec("ktransformers") is not None
if KT_AVAILABLE:
from ktransformers.models.modeling_deepseek import DeepseekV2ForCausalLM
from ktransformers.models.modeling_deepseek_v3 import DeepseekV3ForCausalLM
from ktransformers.models.modeling_llama import LlamaForCausalLM
from ktransformers.models.modeling_mixtral import MixtralForCausalLM
from ktransformers.models.modeling_qwen2_moe import Qwen2MoeForCausalLM
from ktransformers.models.modeling_qwen3_moe import Qwen3MoeForCausalLM
from ktransformers.optimize.optimize import optimize_and_load_gguf
from ktransformers.server.config.config import Config
from ktransformers.sft.lora import inject_lora_layer
from ktransformers.util.custom_loader import GGUFLoader, SafeTensorLoader
from ktransformers.util.globals import GLOBAL_CONFIG
from ktransformers.util.utils import load_weights
logger = logging.get_logger(__name__)
def _get_kt_kwargs(
config: "PretrainedConfig",
model_name_or_path: str,
model_args: "ModelArguments",
finetuning_args: "FinetuningArguments",
) -> dict[str, Any]:
return {
"model_name": model_name_or_path,
"max_seq_length": model_args.model_max_length or 4096,
"dtype": model_args.compute_dtype,
"load_in_4bit": model_args.quantization_bit == 4,
"token": model_args.hf_hub_token,
"full_finetuning": finetuning_args.finetuning_type == "full",
"device_map": {"": get_current_device()},
"rope_scaling": getattr(config, "rope_scaling", None),
"fix_tokenizer": False,
"trust_remote_code": model_args.trust_remote_code,
"use_gradient_checkpointing": "ktransformers",
}
def load_kt_pretrained_model(config: "PretrainedConfig", model_args: "ModelArguments") -> "PreTrainedModel":
custom_models = {
"DeepseekV2ForCausalLM": DeepseekV2ForCausalLM,
"DeepseekV3ForCausalLM": DeepseekV3ForCausalLM,
"Qwen2MoeForCausalLM": Qwen2MoeForCausalLM,
"Qwen3MoeForCausalLM": Qwen3MoeForCausalLM,
"LlamaForCausalLM": LlamaForCausalLM,
"MixtralForCausalLM": MixtralForCausalLM,
}
Config().cpu_infer = model_args.cpu_infer
Config().chunk_size = model_args.chunk_size
config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code)
if model_args.mode == "long_context":
assert config.architectures[0] == "LlamaForCausalLM", "only LlamaForCausalLM support long_context mode"
torch.set_default_dtype(torch.float16)
else:
torch.set_default_dtype(config.torch_dtype)
with torch.device("meta"):
if config.architectures[0] in custom_models:
print("using custom modeling_xxx.py.")
if "Qwen2Moe" in config.architectures[0]: # Qwen2Moe must use flash_attention_2 to avoid overflow.
config._attn_implementation = "flash_attention_2"
if "Llama" in config.architectures[0]:
config._attn_implementation = "eager"
if "Mixtral" in config.architectures[0]:
config._attn_implementation = "flash_attention_2"
model = custom_models[config.architectures[0]](config)
else:
attn_implementation = "flash_attention_2"
model = AutoModelForCausalLM.from_config(
config, trust_remote_code=True, attn_implementation=attn_implementation
)
optimize_config_path = model_args.kt_optimize_rule
gguf_path = model_args.model_name_or_path
assert optimize_config_path is not None, "optimize_config_path must be provided (path to YAML rules file)."
assert gguf_path is not None, "gguf_path must be provided (path to a folder or .gguf file)."
GLOBAL_CONFIG._config["mod"] = "infer"
optimize_and_load_gguf(model, optimize_config_path, gguf_path, config)
return model
def get_kt_peft_model(model: "PreTrainedModel", peft_kwargs: dict[str, Any]) -> "PreTrainedModel":
from ktransformers.sft.peft_utils.mapping import get_peft_model
return get_peft_model(model, peft_kwargs)
def load_kt_peft_model(model_args: "ModelArguments", model: "PreTrainedModel") -> "PreTrainedModel":
load_adapter_name_or_path = model_args.adapter_name_or_path[0]
if load_adapter_name_or_path.endswith(".gguf"):
inject_lora_layer(model, load_adapter_name_or_path)
adapter_gguf_loader = GGUFLoader(load_adapter_name_or_path)
load_weights(model, adapter_gguf_loader, adapter_gguf=True)
model.train()
else:
inject_lora_layer(model, load_adapter_name_or_path)
adapter_loader = SafeTensorLoader(load_adapter_name_or_path)
device = next(model.parameters()).device
for key in adapter_loader.tensor_file_map.keys():
try:
tensor = adapter_loader.load_tensor(key, device=device)
model_key = key.replace("base_model.model.", "")
model_key = model_key.replace(".weight", ".default.weight")
model_key = model_key.replace(".default.default.weight", ".default.weight")
param = model.get_parameter(model_key)
param.data.copy_(tensor.data)
print(f"Loaded adapter weight: {key} -> {model_key}")
except AttributeError:
print(f"Skipping {key}: not a model parameter")
except KeyError:
print(f"Key not found in model: {model_key} (original: {key})")
return model | --- +++ @@ -67,6 +67,7 @@
def load_kt_pretrained_model(config: "PretrainedConfig", model_args: "ModelArguments") -> "PreTrainedModel":
+ r"""Optionally load pretrained model with KTransformers. Used in training."""
custom_models = {
"DeepseekV2ForCausalLM": DeepseekV2ForCausalLM,
"DeepseekV3ForCausalLM": DeepseekV3ForCausalLM,
@@ -114,12 +115,14 @@
def get_kt_peft_model(model: "PreTrainedModel", peft_kwargs: dict[str, Any]) -> "PreTrainedModel":
+ r"""Get the peft model for the pretrained model with KTransformers. Used in training."""
from ktransformers.sft.peft_utils.mapping import get_peft_model
return get_peft_model(model, peft_kwargs)
def load_kt_peft_model(model_args: "ModelArguments", model: "PreTrainedModel") -> "PreTrainedModel":
+ r"""Load peft model with KTransformers. Used in both training and inference."""
load_adapter_name_or_path = model_args.adapter_name_or_path[0]
if load_adapter_name_or_path.endswith(".gguf"):
inject_lora_layer(model, load_adapter_name_or_path)
@@ -148,4 +151,4 @@ except KeyError:
print(f"Key not found in model: {model_key} (original: {key})")
- return model+ return model
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/model/model_utils/ktransformers.py |
Write docstrings for utility functions | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's PEFT library.
# https://github.com/huggingface/peft/blob/v0.10.0/src/peft/peft_model.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import os
import socket
from typing import TYPE_CHECKING, Any, Literal, Optional, Union
import torch
import torch.distributed as dist
import transformers.dynamic_module_utils
from huggingface_hub.utils import WeakFileLock
from transformers import InfNanRemoveLogitsProcessor, LogitsProcessorList
from transformers.dynamic_module_utils import get_relative_imports
from transformers.utils import (
is_torch_bf16_gpu_available,
is_torch_cuda_available,
is_torch_mps_available,
is_torch_npu_available,
is_torch_xpu_available,
)
from transformers.utils.versions import require_version
from . import logging
_is_fp16_available = is_torch_npu_available() or is_torch_cuda_available()
try:
_is_bf16_available = is_torch_bf16_gpu_available() or (is_torch_npu_available() and torch.npu.is_bf16_supported())
except Exception:
_is_bf16_available = False
if TYPE_CHECKING:
from numpy.typing import NDArray
from ..hparams import ModelArguments
logger = logging.get_logger(__name__)
class AverageMeter:
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_version(requirement: str, mandatory: bool = False) -> None:
if is_env_enabled("DISABLE_VERSION_CHECK") and not mandatory:
logger.warning_rank0_once("Version checking has been disabled, may lead to unexpected behaviors.")
return
if "gptmodel" in requirement or "autoawq" in requirement:
pip_command = f"pip install {requirement} --no-build-isolation"
else:
pip_command = f"pip install {requirement}"
if mandatory:
hint = f"To fix: run `{pip_command}`."
else:
hint = f"To fix: run `{pip_command}` or set `DISABLE_VERSION_CHECK=1` to skip this check."
require_version(requirement, hint)
def check_dependencies() -> None:
check_version("transformers>=4.51.0,<=5.2.0")
check_version("datasets>=2.16.0,<=4.0.0")
check_version("accelerate>=1.3.0,<=1.11.0")
check_version("peft>=0.18.0,<=0.18.1")
check_version("trl>=0.18.0,<=0.24.0")
def calculate_tps(dataset: list[dict[str, Any]], metrics: dict[str, float], stage: Literal["sft", "rm"]) -> float:
effective_token_num = 0
for data in dataset:
if stage == "sft":
effective_token_num += len(data["input_ids"])
elif stage == "rm":
effective_token_num += len(data["chosen_input_ids"]) + len(data["rejected_input_ids"])
result = effective_token_num * metrics["epoch"] / metrics["train_runtime"]
return result / dist.get_world_size() if dist.is_initialized() else result
def count_parameters(model: "torch.nn.Module") -> tuple[int, int]:
trainable_params, all_param = 0, 0
for param in model.parameters():
num_params = param.numel()
# if using DS Zero 3 and the weights are initialized empty
if num_params == 0 and hasattr(param, "ds_numel"):
num_params = param.ds_numel
# Due to the design of 4bit linear layers from bitsandbytes, multiply the number of parameters by itemsize
if param.__class__.__name__ == "Params4bit":
if hasattr(param, "quant_storage") and hasattr(param.quant_storage, "itemsize"):
num_bytes = param.quant_storage.itemsize
elif hasattr(param, "element_size"): # for older pytorch version
num_bytes = param.element_size()
else:
num_bytes = 1
num_params = num_params * 2 * num_bytes
all_param += num_params
if param.requires_grad:
trainable_params += num_params
return trainable_params, all_param
def get_current_device() -> "torch.device":
if is_torch_xpu_available():
device = "xpu:{}".format(os.getenv("LOCAL_RANK", "0"))
elif is_torch_npu_available():
device = "npu:{}".format(os.getenv("LOCAL_RANK", "0"))
elif is_torch_mps_available():
device = "mps:{}".format(os.getenv("LOCAL_RANK", "0"))
elif is_torch_cuda_available():
device = "cuda:{}".format(os.getenv("LOCAL_RANK", "0"))
else:
device = "cpu"
return torch.device(device)
def get_device_name() -> str:
if is_torch_xpu_available():
device = "xpu"
elif is_torch_npu_available():
device = "npu"
elif is_torch_mps_available():
device = "mps"
elif is_torch_cuda_available():
device = "gpu"
else:
device = "cpu"
return device
def get_torch_device():
device_name = get_device_name()
device_name = "cuda" if device_name == "gpu" else device_name
try:
return getattr(torch, device_name)
except AttributeError:
logger.warning_rank0(f"Device namespace '{device_name}' not found in torch, try to load torch.cuda.")
return torch.cuda
def get_device_count() -> int:
if is_torch_xpu_available():
return torch.xpu.device_count()
elif is_torch_npu_available():
return torch.npu.device_count()
elif is_torch_mps_available():
return torch.mps.device_count()
elif is_torch_cuda_available():
return torch.cuda.device_count()
else:
return 0
def get_logits_processor() -> "LogitsProcessorList":
logits_processor = LogitsProcessorList()
logits_processor.append(InfNanRemoveLogitsProcessor())
return logits_processor
def get_current_memory() -> tuple[int, int]:
if is_torch_xpu_available():
return torch.xpu.mem_get_info()
elif is_torch_npu_available():
return torch.npu.mem_get_info()
elif is_torch_mps_available():
return torch.mps.current_allocated_memory(), torch.mps.recommended_max_memory()
elif is_torch_cuda_available():
return torch.cuda.mem_get_info()
else:
return 0, -1
def get_peak_memory() -> tuple[int, int]:
if is_torch_xpu_available():
return torch.xpu.max_memory_allocated(), torch.xpu.max_memory_reserved()
elif is_torch_npu_available():
return torch.npu.max_memory_allocated(), torch.npu.max_memory_reserved()
elif is_torch_mps_available():
return torch.mps.current_allocated_memory(), -1
elif is_torch_cuda_available():
return torch.cuda.max_memory_allocated(), torch.cuda.max_memory_reserved()
else:
return 0, -1
def has_tokenized_data(path: "os.PathLike") -> bool:
return os.path.isdir(path) and len(os.listdir(path)) > 0
def infer_optim_dtype(model_dtype: Optional["torch.dtype"]) -> "torch.dtype":
if _is_bf16_available and (model_dtype == torch.bfloat16 or model_dtype is None):
return torch.bfloat16
elif _is_fp16_available:
return torch.float16
else:
return torch.float32
def is_accelerator_available() -> bool:
return (
is_torch_xpu_available() or is_torch_npu_available() or is_torch_mps_available() or is_torch_cuda_available()
)
def is_env_enabled(env_var: str, default: str = "0") -> bool:
return os.getenv(env_var, default).lower() in ["true", "y", "1"]
def numpify(inputs: Union["NDArray", "torch.Tensor"]) -> "NDArray":
if isinstance(inputs, torch.Tensor):
inputs = inputs.cpu()
if inputs.dtype == torch.bfloat16: # numpy does not support bfloat16 until 1.21.4
inputs = inputs.to(torch.float32)
inputs = inputs.numpy()
return inputs
def skip_check_imports() -> None:
if not is_env_enabled("FORCE_CHECK_IMPORTS"):
transformers.dynamic_module_utils.check_imports = get_relative_imports
def torch_gc() -> None:
gc.collect()
if is_torch_xpu_available():
torch.xpu.empty_cache()
elif is_torch_npu_available():
torch.npu.empty_cache()
elif is_torch_mps_available():
torch.mps.empty_cache()
elif is_torch_cuda_available():
torch.cuda.empty_cache()
def try_download_model_from_other_hub(model_args: "ModelArguments") -> str:
if (not use_modelscope() and not use_openmind()) or os.path.exists(model_args.model_name_or_path):
return model_args.model_name_or_path
if use_modelscope():
check_version("modelscope>=1.14.0", mandatory=True)
from modelscope import snapshot_download # type: ignore
from modelscope.hub.api import HubApi # type: ignore
if model_args.ms_hub_token:
api = HubApi()
api.login(model_args.ms_hub_token)
revision = "master" if model_args.model_revision == "main" else model_args.model_revision
with WeakFileLock(os.path.abspath(os.path.expanduser("~/.cache/llamafactory/modelscope.lock"))):
model_path = snapshot_download(
model_args.model_name_or_path,
revision=revision,
cache_dir=model_args.cache_dir,
)
return model_path
if use_openmind():
check_version("openmind>=0.8.0", mandatory=True)
from openmind.utils.hub import snapshot_download # type: ignore
with WeakFileLock(os.path.abspath(os.path.expanduser("~/.cache/llamafactory/openmind.lock"))):
model_path = snapshot_download(
model_args.model_name_or_path,
revision=model_args.model_revision,
cache_dir=model_args.cache_dir,
)
return model_path
def use_modelscope() -> bool:
return is_env_enabled("USE_MODELSCOPE_HUB")
def use_openmind() -> bool:
return is_env_enabled("USE_OPENMIND_HUB")
def use_ray() -> bool:
return is_env_enabled("USE_RAY")
def use_kt() -> bool:
return is_env_enabled("USE_KT")
def find_available_port() -> int:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def fix_proxy(ipv6_enabled: bool = False) -> None:
os.environ["no_proxy"] = "localhost,127.0.0.1,0.0.0.0"
if ipv6_enabled:
os.environ.pop("http_proxy", None)
os.environ.pop("HTTP_PROXY", None)
os.environ.pop("https_proxy", None)
os.environ.pop("HTTPS_PROXY", None)
os.environ.pop("all_proxy", None)
os.environ.pop("ALL_PROXY", None) | --- +++ @@ -55,6 +55,7 @@
class AverageMeter:
+ r"""Compute and store the average and current value."""
def __init__(self):
self.reset()
@@ -73,6 +74,7 @@
def check_version(requirement: str, mandatory: bool = False) -> None:
+ r"""Optionally check the package version."""
if is_env_enabled("DISABLE_VERSION_CHECK") and not mandatory:
logger.warning_rank0_once("Version checking has been disabled, may lead to unexpected behaviors.")
return
@@ -91,6 +93,7 @@
def check_dependencies() -> None:
+ r"""Check the version of the required packages."""
check_version("transformers>=4.51.0,<=5.2.0")
check_version("datasets>=2.16.0,<=4.0.0")
check_version("accelerate>=1.3.0,<=1.11.0")
@@ -99,6 +102,7 @@
def calculate_tps(dataset: list[dict[str, Any]], metrics: dict[str, float], stage: Literal["sft", "rm"]) -> float:
+ r"""Calculate effective tokens per second."""
effective_token_num = 0
for data in dataset:
if stage == "sft":
@@ -111,6 +115,7 @@
def count_parameters(model: "torch.nn.Module") -> tuple[int, int]:
+ r"""Return the number of trainable parameters and number of all parameters in the model."""
trainable_params, all_param = 0, 0
for param in model.parameters():
num_params = param.numel()
@@ -137,6 +142,7 @@
def get_current_device() -> "torch.device":
+ r"""Get the current available device."""
if is_torch_xpu_available():
device = "xpu:{}".format(os.getenv("LOCAL_RANK", "0"))
elif is_torch_npu_available():
@@ -152,6 +158,7 @@
def get_device_name() -> str:
+ r"""Get the name of available devices."""
if is_torch_xpu_available():
device = "xpu"
elif is_torch_npu_available():
@@ -167,6 +174,7 @@
def get_torch_device():
+ r"""Get the torch device namespace for the available devices."""
device_name = get_device_name()
device_name = "cuda" if device_name == "gpu" else device_name
try:
@@ -177,6 +185,7 @@
def get_device_count() -> int:
+ r"""Get the number of available devices."""
if is_torch_xpu_available():
return torch.xpu.device_count()
elif is_torch_npu_available():
@@ -190,12 +199,14 @@
def get_logits_processor() -> "LogitsProcessorList":
+ r"""Get logits processor that removes NaN and Inf logits."""
logits_processor = LogitsProcessorList()
logits_processor.append(InfNanRemoveLogitsProcessor())
return logits_processor
def get_current_memory() -> tuple[int, int]:
+ r"""Get the available and total memory for the current device (in Bytes)."""
if is_torch_xpu_available():
return torch.xpu.mem_get_info()
elif is_torch_npu_available():
@@ -209,6 +220,7 @@
def get_peak_memory() -> tuple[int, int]:
+ r"""Get the peak memory usage (allocated, reserved) for the current device (in Bytes)."""
if is_torch_xpu_available():
return torch.xpu.max_memory_allocated(), torch.xpu.max_memory_reserved()
elif is_torch_npu_available():
@@ -222,10 +234,12 @@
def has_tokenized_data(path: "os.PathLike") -> bool:
+ r"""Check if the path has a tokenized dataset."""
return os.path.isdir(path) and len(os.listdir(path)) > 0
def infer_optim_dtype(model_dtype: Optional["torch.dtype"]) -> "torch.dtype":
+ r"""Infer the optimal dtype according to the model_dtype and device compatibility."""
if _is_bf16_available and (model_dtype == torch.bfloat16 or model_dtype is None):
return torch.bfloat16
elif _is_fp16_available:
@@ -235,16 +249,19 @@
def is_accelerator_available() -> bool:
+ r"""Check if the accelerator is available."""
return (
is_torch_xpu_available() or is_torch_npu_available() or is_torch_mps_available() or is_torch_cuda_available()
)
def is_env_enabled(env_var: str, default: str = "0") -> bool:
+ r"""Check if the environment variable is enabled."""
return os.getenv(env_var, default).lower() in ["true", "y", "1"]
def numpify(inputs: Union["NDArray", "torch.Tensor"]) -> "NDArray":
+ r"""Cast a torch tensor or a numpy array to a numpy array."""
if isinstance(inputs, torch.Tensor):
inputs = inputs.cpu()
if inputs.dtype == torch.bfloat16: # numpy does not support bfloat16 until 1.21.4
@@ -256,11 +273,13 @@
def skip_check_imports() -> None:
+ r"""Avoid flash attention import error in custom model files."""
if not is_env_enabled("FORCE_CHECK_IMPORTS"):
transformers.dynamic_module_utils.check_imports = get_relative_imports
def torch_gc() -> None:
+ r"""Collect the device memory."""
gc.collect()
if is_torch_xpu_available():
torch.xpu.empty_cache()
@@ -326,6 +345,7 @@
def find_available_port() -> int:
+ r"""Find an available port on the local machine."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
@@ -334,6 +354,7 @@
def fix_proxy(ipv6_enabled: bool = False) -> None:
+ r"""Fix proxy settings for gradio ui."""
os.environ["no_proxy"] = "localhost,127.0.0.1,0.0.0.0"
if ipv6_enabled:
os.environ.pop("http_proxy", None)
@@ -341,4 +362,4 @@ os.environ.pop("https_proxy", None)
os.environ.pop("HTTPS_PROXY", None)
os.environ.pop("all_proxy", None)
- os.environ.pop("ALL_PROXY", None)+ os.environ.pop("ALL_PROXY", None)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/extras/misc.py |
Add docstrings to clarify complex logic | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from abc import abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Union
from ..extras import logging
from .data_utils import Role
if TYPE_CHECKING:
from datasets import Dataset, IterableDataset
from transformers import Seq2SeqTrainingArguments
from ..hparams import DataArguments
from .mm_plugin import AudioInput, ImageInput, VideoInput
from .parser import DatasetAttr
MediaType = Union[ImageInput, VideoInput, AudioInput]
logger = logging.get_logger(__name__)
@dataclass
class DatasetConverter:
dataset_attr: "DatasetAttr"
data_args: "DataArguments"
def _find_medias(self, medias: Union["MediaType", list["MediaType"], None]) -> list["MediaType"] | None:
if medias is None:
return None
elif not isinstance(medias, list):
medias = [medias]
elif len(medias) == 0:
return None
else:
medias = medias[:]
if self.dataset_attr.load_from in ["script", "file"]:
if isinstance(medias[0], str):
for i in range(len(medias)):
media_path = os.path.join(self.data_args.media_dir, medias[i])
if os.path.isfile(media_path):
medias[i] = media_path
else:
logger.warning_rank0_once(
f"Media {medias[i]} does not exist in `media_dir`. Use original path."
)
elif isinstance(medias[0], list): # for processed video frames
# medias is a list of lists, e.g., [[frame1.jpg, frame2.jpg], [frame3.jpg, frame4.jpg]]
for i in range(len(medias)):
for j in range(len(medias[i])):
media_path = os.path.join(self.data_args.media_dir, medias[i][j])
if os.path.isfile(media_path):
medias[i][j] = media_path
else:
logger.warning_rank0_once(
f"Media {medias[i][j]} does not exist in `media_dir`. Use original path."
)
return medias
@abstractmethod
def __call__(self, example: dict[str, Any]) -> dict[str, Any]:
...
@dataclass
class AlpacaDatasetConverter(DatasetConverter):
def __call__(self, example: dict[str, Any]) -> dict[str, Any]:
prompt = []
if self.dataset_attr.history and isinstance(example[self.dataset_attr.history], list):
for old_prompt, old_response in example[self.dataset_attr.history]:
prompt.append({"role": Role.USER.value, "content": old_prompt})
prompt.append({"role": Role.ASSISTANT.value, "content": old_response})
query = []
if self.dataset_attr.prompt and example[self.dataset_attr.prompt]:
query.append(example[self.dataset_attr.prompt])
if self.dataset_attr.query and example[self.dataset_attr.query]:
query.append(example[self.dataset_attr.query])
prompt.append({"role": Role.USER.value, "content": "\n".join(query)}) # "prompt\nquery"
if self.dataset_attr.kto_tag and isinstance(example[self.dataset_attr.kto_tag], bool): # kto example
response = [{"role": Role.ASSISTANT.value, "content": example[self.dataset_attr.response]}]
if example[self.dataset_attr.kto_tag]:
response = response + [{"role": Role.ASSISTANT.value, "content": ""}]
else:
response = [{"role": Role.ASSISTANT.value, "content": ""}] + response
elif (
self.dataset_attr.ranking
and isinstance(example[self.dataset_attr.chosen], str)
and isinstance(example[self.dataset_attr.rejected], str)
): # pairwise example
response = [
{"role": Role.ASSISTANT.value, "content": example[self.dataset_attr.chosen]},
{"role": Role.ASSISTANT.value, "content": example[self.dataset_attr.rejected]},
]
elif self.dataset_attr.response and isinstance(example[self.dataset_attr.response], str): # normal example
response = [{"role": Role.ASSISTANT.value, "content": example[self.dataset_attr.response]}]
else: # unsupervised
response = []
output = {
"_prompt": prompt,
"_response": response,
"_system": example[self.dataset_attr.system] if self.dataset_attr.system else "",
"_tools": example[self.dataset_attr.tools] if self.dataset_attr.tools else "",
"_images": self._find_medias(example[self.dataset_attr.images]) if self.dataset_attr.images else None,
"_videos": self._find_medias(example[self.dataset_attr.videos]) if self.dataset_attr.videos else None,
"_audios": self._find_medias(example[self.dataset_attr.audios]) if self.dataset_attr.audios else None,
}
return output
@dataclass
class SharegptDatasetConverter(DatasetConverter):
def __call__(self, example: dict[str, Any]) -> dict[str, Any]:
tag_mapping = {
self.dataset_attr.user_tag: Role.USER.value,
self.dataset_attr.assistant_tag: Role.ASSISTANT.value,
self.dataset_attr.observation_tag: Role.OBSERVATION.value,
self.dataset_attr.function_tag: Role.FUNCTION.value,
self.dataset_attr.system_tag: Role.SYSTEM.value,
}
odd_tags = (self.dataset_attr.user_tag, self.dataset_attr.observation_tag)
even_tags = (self.dataset_attr.assistant_tag, self.dataset_attr.function_tag)
accept_tags = (odd_tags, even_tags)
messages = example[self.dataset_attr.messages]
if (
self.dataset_attr.system_tag
and len(messages) != 0
and messages[0][self.dataset_attr.role_tag] == self.dataset_attr.system_tag
):
system = messages[0][self.dataset_attr.content_tag]
messages = messages[1:]
else:
system = example[self.dataset_attr.system] if self.dataset_attr.system else ""
aligned_messages = []
broken_data = False
for turn_idx, message in enumerate(messages):
if message[self.dataset_attr.role_tag] not in accept_tags[turn_idx % 2]:
logger.warning_rank0(f"Invalid role tag in {messages}.")
broken_data = True
break
aligned_messages.append(
{
"role": tag_mapping[message[self.dataset_attr.role_tag]],
"content": message[self.dataset_attr.content_tag],
}
)
if (not self.dataset_attr.ranking and len(aligned_messages) % 2 != 0) or (
self.dataset_attr.ranking and len(aligned_messages) % 2 == 0
):
logger.warning_rank0(f"Invalid message count in {messages}.")
broken_data = True
if broken_data:
logger.warning_rank0("Skipping this abnormal example.")
prompt, response = [], []
elif self.dataset_attr.kto_tag and isinstance(example[self.dataset_attr.kto_tag], bool): # kto example
prompt = aligned_messages[:-1]
response = aligned_messages[-1:]
if example[self.dataset_attr.kto_tag]:
response = response + [{"role": Role.ASSISTANT.value, "content": ""}]
else:
response = [{"role": Role.ASSISTANT.value, "content": ""}] + response
elif (
self.dataset_attr.ranking
and isinstance(example[self.dataset_attr.chosen], dict)
and isinstance(example[self.dataset_attr.rejected], dict)
): # pairwise example
chosen = example[self.dataset_attr.chosen]
rejected = example[self.dataset_attr.rejected]
if (
chosen[self.dataset_attr.role_tag] not in accept_tags[-1]
or rejected[self.dataset_attr.role_tag] not in accept_tags[-1]
):
logger.warning_rank0(f"Invalid role tag in {[chosen, rejected]}.")
broken_data = True
prompt = aligned_messages
response = [
{
"role": tag_mapping[chosen[self.dataset_attr.role_tag]],
"content": chosen[self.dataset_attr.content_tag],
},
{
"role": tag_mapping[rejected[self.dataset_attr.role_tag]],
"content": rejected[self.dataset_attr.content_tag],
},
]
else: # normal example
prompt = aligned_messages[:-1]
response = aligned_messages[-1:]
output = {
"_prompt": prompt,
"_response": response,
"_system": system,
"_tools": example[self.dataset_attr.tools] if self.dataset_attr.tools else "",
"_images": self._find_medias(example[self.dataset_attr.images]) if self.dataset_attr.images else None,
"_videos": self._find_medias(example[self.dataset_attr.videos]) if self.dataset_attr.videos else None,
"_audios": self._find_medias(example[self.dataset_attr.audios]) if self.dataset_attr.audios else None,
}
return output
@dataclass
class OpenAIDatasetConverter(DatasetConverter):
def __call__(self, example: dict[str, Any]) -> dict[str, Any]:
tag_mapping = {
self.dataset_attr.user_tag: Role.USER.value,
self.dataset_attr.assistant_tag: Role.ASSISTANT.value,
self.dataset_attr.observation_tag: Role.OBSERVATION.value,
self.dataset_attr.function_tag: Role.FUNCTION.value,
self.dataset_attr.system_tag: Role.SYSTEM.value,
}
messages = example[self.dataset_attr.messages]
if (
self.dataset_attr.system_tag
and len(messages) != 0
and messages[0][self.dataset_attr.role_tag] == self.dataset_attr.system_tag
):
system = messages[0][self.dataset_attr.content_tag]
messages = messages[1:]
else:
system = example.get(self.dataset_attr.system, "") if self.dataset_attr.system else ""
aligned_messages = []
tool_responses = []
broken_data = False
for turn_idx, message in enumerate(messages):
role = message[self.dataset_attr.role_tag]
content = message[self.dataset_attr.content_tag]
if role in [self.dataset_attr.assistant_tag, self.dataset_attr.function_tag]:
if "tool_calls" in message and len(message["tool_calls"]) > 0:
tool_calls_list = [tool["function"] for tool in message["tool_calls"]]
content = json.dumps(tool_calls_list, ensure_ascii=False)
role = self.dataset_attr.function_tag
if role == self.dataset_attr.observation_tag:
tool_responses.append(content)
continue
elif len(tool_responses) > 0:
_content = "\n</tool_response>\n<tool_response>\n".join(tool_responses)
aligned_messages.append(
{
"role": Role.OBSERVATION.value,
"content": _content,
}
)
tool_responses = []
aligned_messages.append(
{
"role": tag_mapping[role],
"content": content,
}
)
odd_tags = (Role.USER.value, Role.OBSERVATION.value)
even_tags = (Role.ASSISTANT.value, Role.FUNCTION.value)
accept_tags = (odd_tags, even_tags)
for turn_idx, message in enumerate(aligned_messages):
if message["role"] not in accept_tags[turn_idx % 2]:
logger.warning_rank0(f"Invalid role tag in {messages}.")
broken_data = True
break
if (not self.dataset_attr.ranking and len(aligned_messages) % 2 != 0) or (
self.dataset_attr.ranking and len(aligned_messages) % 2 == 0
):
logger.warning_rank0(f"Invalid message count in {messages}.")
broken_data = True
if broken_data:
logger.warning_rank0("Skipping this abnormal example.")
prompt, response = [], []
elif self.dataset_attr.kto_tag and isinstance(example[self.dataset_attr.kto_tag], bool): # kto example
prompt = aligned_messages[:-1]
response = aligned_messages[-1:]
if example[self.dataset_attr.kto_tag]:
response = response + [{"role": Role.ASSISTANT.value, "content": ""}]
else:
response = [{"role": Role.ASSISTANT.value, "content": ""}] + response
elif (
self.dataset_attr.ranking
and isinstance(example[self.dataset_attr.chosen], dict)
and isinstance(example[self.dataset_attr.rejected], dict)
): # pairwise example
chosen = example[self.dataset_attr.chosen]
rejected = example[self.dataset_attr.rejected]
if (
chosen[self.dataset_attr.role_tag] not in accept_tags[-1]
or rejected[self.dataset_attr.role_tag] not in accept_tags[-1]
):
logger.warning_rank0(f"Invalid role tag in {[chosen, rejected]}.")
broken_data = True
prompt = aligned_messages
response = [
{
"role": tag_mapping[chosen[self.dataset_attr.role_tag]],
"content": chosen[self.dataset_attr.content_tag],
},
{
"role": tag_mapping[rejected[self.dataset_attr.role_tag]],
"content": rejected[self.dataset_attr.content_tag],
},
]
else: # normal example
prompt = aligned_messages[:-1]
response = aligned_messages[-1:]
tools = example.get(self.dataset_attr.tools, "") if self.dataset_attr.tools else ""
if isinstance(tools, dict) or isinstance(tools, list):
tools = json.dumps(tools, ensure_ascii=False)
short_system_prompt = "detailed thinking off"
if not system:
if not tools:
system = short_system_prompt
else:
pass
else:
if not tools:
if "detailed thinking on" in system or "detailed thinking off" in system:
pass
else:
system += "\n" + short_system_prompt
else:
system += "\n"
output = {
"_prompt": prompt,
"_response": response,
"_system": system,
"_tools": tools,
"_images": self._find_medias(example[self.dataset_attr.images]) if self.dataset_attr.images else None,
"_videos": self._find_medias(example[self.dataset_attr.videos]) if self.dataset_attr.videos else None,
"_audios": self._find_medias(example[self.dataset_attr.audios]) if self.dataset_attr.audios else None,
}
return output
DATASET_CONVERTERS = {
"alpaca": AlpacaDatasetConverter,
"sharegpt": SharegptDatasetConverter,
"openai": OpenAIDatasetConverter,
}
def register_dataset_converter(name: str, dataset_converter: type["DatasetConverter"]) -> None:
if name in DATASET_CONVERTERS:
raise ValueError(f"Dataset converter {name} already exists.")
DATASET_CONVERTERS[name] = dataset_converter
def get_dataset_converter(name: str, dataset_attr: "DatasetAttr", data_args: "DataArguments") -> "DatasetConverter":
if name not in DATASET_CONVERTERS:
raise ValueError(f"Dataset converter {name} not found.")
return DATASET_CONVERTERS[name](dataset_attr, data_args)
def align_dataset(
dataset: Union["Dataset", "IterableDataset"],
dataset_attr: "DatasetAttr",
data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
) -> Union["Dataset", "IterableDataset"]:
column_names = list(next(iter(dataset)).keys())
kwargs = {}
if not data_args.streaming:
kwargs = dict(
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=(not data_args.overwrite_cache) or (training_args.local_process_index != 0),
desc="Converting format of dataset",
)
dataset_converter = get_dataset_converter(dataset_attr.formatting, dataset_attr, data_args)
return dataset.map(
dataset_converter,
batched=False,
remove_columns=column_names,
**kwargs,
) | --- +++ @@ -41,6 +41,7 @@ data_args: "DataArguments"
def _find_medias(self, medias: Union["MediaType", list["MediaType"], None]) -> list["MediaType"] | None:
+ r"""Optionally concatenate media path to media dir when loading from local disk."""
if medias is None:
return None
elif not isinstance(medias, list):
@@ -76,6 +77,7 @@
@abstractmethod
def __call__(self, example: dict[str, Any]) -> dict[str, Any]:
+ r"""Convert a single example in the dataset to the standard format."""
...
@@ -373,6 +375,7 @@
def register_dataset_converter(name: str, dataset_converter: type["DatasetConverter"]) -> None:
+ r"""Register a new dataset converter."""
if name in DATASET_CONVERTERS:
raise ValueError(f"Dataset converter {name} already exists.")
@@ -380,6 +383,7 @@
def get_dataset_converter(name: str, dataset_attr: "DatasetAttr", data_args: "DataArguments") -> "DatasetConverter":
+ r"""Get a dataset converter."""
if name not in DATASET_CONVERTERS:
raise ValueError(f"Dataset converter {name} not found.")
@@ -392,6 +396,17 @@ data_args: "DataArguments",
training_args: "Seq2SeqTrainingArguments",
) -> Union["Dataset", "IterableDataset"]:
+ r"""Align the dataset to a specific format.
+
+ Aligned dataset:
+ _prompt: [{"role": "user", "content": "..."}] * (2T - 1)
+ _response: [{"role": "assistant", "content": "..."}] * N (N > 1 for ranking dataset)
+ _system: "..."
+ _tools: "..."
+ _images: []
+ _videos: []
+ _audios: []
+ """
column_names = list(next(iter(dataset)).keys())
kwargs = {}
if not data_args.streaming:
@@ -407,4 +422,4 @@ batched=False,
remove_columns=column_names,
**kwargs,
- )+ )
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/data/converter.py |
Write reusable docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Optional
from ...extras import logging
from ...extras.misc import get_current_device
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel
from ...hparams import FinetuningArguments, ModelArguments
logger = logging.get_logger(__name__)
def _get_unsloth_kwargs(
config: "PretrainedConfig",
model_name_or_path: str,
model_args: "ModelArguments",
finetuning_args: "FinetuningArguments",
) -> dict[str, Any]:
return {
"model_name": model_name_or_path,
"max_seq_length": model_args.model_max_length or 4096,
"dtype": model_args.compute_dtype,
"load_in_4bit": model_args.quantization_bit == 4,
"token": model_args.hf_hub_token,
"full_finetuning": finetuning_args.finetuning_type == "full",
"device_map": {"": get_current_device()},
"rope_scaling": getattr(config, "rope_scaling", None),
"fix_tokenizer": False,
"trust_remote_code": model_args.trust_remote_code,
"use_gradient_checkpointing": "unsloth",
}
def load_unsloth_pretrained_model(
config: "PretrainedConfig", model_args: "ModelArguments", finetuning_args: "FinetuningArguments"
) -> Optional["PreTrainedModel"]:
from unsloth import FastLanguageModel # type: ignore
unsloth_kwargs = _get_unsloth_kwargs(config, model_args.model_name_or_path, model_args, finetuning_args)
try:
model, _ = FastLanguageModel.from_pretrained(**unsloth_kwargs)
except NotImplementedError:
logger.warning_rank0("Unsloth does not support model type {}.".format(getattr(config, "model_type", None)))
model = None
model_args.use_unsloth = False
return model
def get_unsloth_peft_model(
model: "PreTrainedModel", model_args: "ModelArguments", peft_kwargs: dict[str, Any]
) -> "PreTrainedModel":
from unsloth import FastLanguageModel # type: ignore
unsloth_peft_kwargs = {
"model": model,
"max_seq_length": model_args.model_max_length,
"use_gradient_checkpointing": "unsloth",
}
return FastLanguageModel.get_peft_model(**peft_kwargs, **unsloth_peft_kwargs)
def load_unsloth_peft_model(
config: "PretrainedConfig",
model_args: "ModelArguments",
finetuning_args: "FinetuningArguments",
is_trainable: bool,
) -> "PreTrainedModel":
from unsloth import FastLanguageModel # type: ignore
unsloth_kwargs = _get_unsloth_kwargs(config, model_args.adapter_name_or_path[0], model_args, finetuning_args)
try:
if not is_trainable:
unsloth_kwargs["use_gradient_checkpointing"] = False
model, _ = FastLanguageModel.from_pretrained(**unsloth_kwargs)
except NotImplementedError:
raise ValueError("Unsloth does not support model type {}.".format(getattr(config, "model_type", None)))
if not is_trainable:
FastLanguageModel.for_inference(model)
return model | --- +++ @@ -51,6 +51,7 @@ def load_unsloth_pretrained_model(
config: "PretrainedConfig", model_args: "ModelArguments", finetuning_args: "FinetuningArguments"
) -> Optional["PreTrainedModel"]:
+ r"""Optionally load pretrained model with unsloth. Used in training."""
from unsloth import FastLanguageModel # type: ignore
unsloth_kwargs = _get_unsloth_kwargs(config, model_args.model_name_or_path, model_args, finetuning_args)
@@ -67,6 +68,7 @@ def get_unsloth_peft_model(
model: "PreTrainedModel", model_args: "ModelArguments", peft_kwargs: dict[str, Any]
) -> "PreTrainedModel":
+ r"""Get the peft model for the pretrained model with unsloth. Used in training."""
from unsloth import FastLanguageModel # type: ignore
unsloth_peft_kwargs = {
@@ -83,6 +85,7 @@ finetuning_args: "FinetuningArguments",
is_trainable: bool,
) -> "PreTrainedModel":
+ r"""Load peft model with unsloth. Used in both training and inference."""
from unsloth import FastLanguageModel # type: ignore
unsloth_kwargs = _get_unsloth_kwargs(config, model_args.adapter_name_or_path[0], model_args, finetuning_args)
@@ -97,4 +100,4 @@ if not is_trainable:
FastLanguageModel.for_inference(model)
- return model+ return model
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/model/model_utils/unsloth.py |
Add docstrings to incomplete code | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import asdict, dataclass, field
from typing import Any, Literal
@dataclass
class FreezeArguments:
freeze_trainable_layers: int = field(
default=2,
metadata={
"help": (
"The number of trainable layers for freeze (partial-parameter) fine-tuning. "
"Positive numbers mean the last n layers are set as trainable, "
"negative numbers mean the first n layers are set as trainable."
)
},
)
freeze_trainable_modules: str = field(
default="all",
metadata={
"help": (
"Name(s) of trainable modules for freeze (partial-parameter) fine-tuning. "
"Use commas to separate multiple modules. "
"Use `all` to specify all the available modules."
)
},
)
freeze_extra_modules: str | None = field(
default=None,
metadata={
"help": (
"Name(s) of modules apart from hidden layers to be set as trainable "
"for freeze (partial-parameter) fine-tuning. "
"Use commas to separate multiple modules."
)
},
)
@dataclass
class LoraArguments:
additional_target: str | None = field(
default=None,
metadata={
"help": (
"Name(s) of modules apart from LoRA layers to be set as trainable "
"and saved in the final checkpoint. "
"Use commas to separate multiple modules."
)
},
)
lora_alpha: int | None = field(
default=None,
metadata={"help": "The scale factor for LoRA fine-tuning (default: lora_rank * 2)."},
)
lora_dropout: float = field(
default=0.0,
metadata={"help": "Dropout rate for the LoRA fine-tuning."},
)
lora_rank: int = field(
default=8,
metadata={"help": "The intrinsic dimension for LoRA fine-tuning."},
)
lora_target: str = field(
default="all",
metadata={
"help": (
"Name(s) of target modules to apply LoRA. "
"Use commas to separate multiple modules. "
"Use `all` to specify all the linear modules."
)
},
)
loraplus_lr_ratio: float | None = field(
default=None,
metadata={"help": "LoRA plus learning rate ratio (lr_B / lr_A)."},
)
loraplus_lr_embedding: float = field(
default=1e-6,
metadata={"help": "LoRA plus learning rate for lora embedding layers."},
)
use_rslora: bool = field(
default=False,
metadata={"help": "Whether or not to use the rank stabilization scaling factor for LoRA layer."},
)
use_dora: bool = field(
default=False,
metadata={"help": "Whether or not to use the weight-decomposed lora method (DoRA)."},
)
pissa_init: bool = field(
default=False,
metadata={"help": "Whether or not to initialize a PiSSA adapter."},
)
pissa_iter: int = field(
default=16,
metadata={"help": "The number of iteration steps performed by FSVD in PiSSA. Use -1 to disable it."},
)
pissa_convert: bool = field(
default=False,
metadata={"help": "Whether or not to convert the PiSSA adapter to a normal LoRA adapter."},
)
create_new_adapter: bool = field(
default=False,
metadata={"help": "Whether or not to create a new adapter with randomly initialized weight."},
)
@dataclass
class OFTArguments:
additional_target: str | None = field(
default=None,
metadata={
"help": (
"Name(s) of modules apart from LoRA layers to be set as trainable "
"and saved in the final checkpoint. "
"Use commas to separate multiple modules."
)
},
)
module_dropout: float = field(
default=0.0,
metadata={"help": "Dropout rate for the OFT fine-tuning."},
)
oft_rank: int = field(
default=0,
metadata={"help": "The intrinsic dimension for OFT fine-tuning."},
)
oft_block_size: int = field(
default=32,
metadata={"help": "The intrinsic dimension for OFT fine-tuning."},
)
oft_target: str = field(
default="all",
metadata={
"help": (
"Name(s) of target modules to apply OFT. "
"Use commas to separate multiple modules. "
"Use `all` to specify all the linear modules."
)
},
)
create_new_adapter: bool = field(
default=False,
metadata={"help": "Whether or not to create a new adapter with randomly initialized weight."},
)
@dataclass
class RLHFArguments:
pref_beta: float = field(
default=0.1,
metadata={"help": "The beta parameter in the preference loss."},
)
pref_ftx: float = field(
default=0.0,
metadata={"help": "The supervised fine-tuning loss coefficient in DPO training."},
)
pref_bco_weight: float = field(
default=0.0,
metadata={"help": "The Binary Classifier Optimization coefficient in DPO training."},
)
pref_loss: Literal["sigmoid", "hinge", "ipo", "kto_pair", "orpo", "simpo"] = field(
default="sigmoid",
metadata={"help": "The type of DPO loss to use."},
)
dpo_label_smoothing: float = field(
default=0.0,
metadata={"help": "The robust DPO label smoothing parameter in cDPO that should be between 0 and 0.5."},
)
kto_chosen_weight: float = field(
default=1.0,
metadata={"help": "The weight factor of the desirable losses in KTO training."},
)
kto_rejected_weight: float = field(
default=1.0,
metadata={"help": "The weight factor of the undesirable losses in KTO training."},
)
simpo_gamma: float = field(
default=0.5,
metadata={"help": "The target reward margin term in SimPO loss."},
)
ppo_buffer_size: int = field(
default=1,
metadata={"help": "The number of mini-batches to make experience buffer in a PPO optimization step."},
)
ppo_epochs: int = field(
default=4,
metadata={"help": "The number of epochs to perform in a PPO optimization step."},
)
ppo_score_norm: bool = field(
default=False,
metadata={"help": "Use score normalization in PPO training."},
)
ppo_target: float = field(
default=6.0,
metadata={"help": "Target KL value for adaptive KL control in PPO training."},
)
ppo_whiten_rewards: bool = field(
default=False,
metadata={"help": "Whiten the rewards before compute advantages in PPO training."},
)
ref_model: str | None = field(
default=None,
metadata={"help": "Path to the reference model used for the PPO or DPO training."},
)
ref_model_adapters: str | None = field(
default=None,
metadata={"help": "Path to the adapters of the reference model."},
)
ref_model_quantization_bit: int | None = field(
default=None,
metadata={"help": "The number of bits to quantize the reference model."},
)
reward_model: str | None = field(
default=None,
metadata={"help": "Path to the reward model used for the PPO training."},
)
reward_model_adapters: str | None = field(
default=None,
metadata={"help": "Path to the adapters of the reward model."},
)
reward_model_quantization_bit: int | None = field(
default=None,
metadata={"help": "The number of bits to quantize the reward model."},
)
reward_model_type: Literal["lora", "full", "api"] = field(
default="lora",
metadata={"help": "The type of the reward model in PPO training. Lora model only supports lora training."},
)
ld_alpha: float | None = field(
default=None,
metadata={
"help": (
"Alpha parameter from the LD-DPO paper, which controls the weighting of"
" the verbose token log-probabilities in responses."
)
},
)
@dataclass
class GaloreArguments:
use_galore: bool = field(
default=False,
metadata={"help": "Whether or not to use the gradient low-Rank projection (GaLore)."},
)
galore_target: str = field(
default="all",
metadata={
"help": (
"Name(s) of modules to apply GaLore. Use commas to separate multiple modules. "
"Use `all` to specify all the linear modules."
)
},
)
galore_rank: int = field(
default=16,
metadata={"help": "The rank of GaLore gradients."},
)
galore_update_interval: int = field(
default=200,
metadata={"help": "Number of steps to update the GaLore projection."},
)
galore_scale: float = field(
default=2.0,
metadata={"help": "GaLore scaling coefficient."},
)
galore_proj_type: Literal["std", "reverse_std", "right", "left", "full"] = field(
default="std",
metadata={"help": "Type of GaLore projection."},
)
galore_layerwise: bool = field(
default=False,
metadata={"help": "Whether or not to enable layer-wise update to further save memory."},
)
@dataclass
class ApolloArguments:
use_apollo: bool = field(
default=False,
metadata={"help": "Whether or not to use the APOLLO optimizer."},
)
apollo_target: str = field(
default="all",
metadata={
"help": (
"Name(s) of modules to apply APOLLO. Use commas to separate multiple modules. "
"Use `all` to specify all the linear modules."
)
},
)
apollo_rank: int = field(
default=16,
metadata={"help": "The rank of APOLLO gradients."},
)
apollo_update_interval: int = field(
default=200,
metadata={"help": "Number of steps to update the APOLLO projection."},
)
apollo_scale: float = field(
default=32.0,
metadata={"help": "APOLLO scaling coefficient."},
)
apollo_proj: Literal["svd", "random"] = field(
default="random",
metadata={"help": "Type of APOLLO low-rank projection algorithm (svd or random)."},
)
apollo_proj_type: Literal["std", "right", "left"] = field(
default="std",
metadata={"help": "Type of APOLLO projection."},
)
apollo_scale_type: Literal["channel", "tensor"] = field(
default="channel",
metadata={"help": "Type of APOLLO scaling (channel or tensor)."},
)
apollo_layerwise: bool = field(
default=False,
metadata={"help": "Whether or not to enable layer-wise update to further save memory."},
)
apollo_scale_front: bool = field(
default=False,
metadata={"help": "Whether or not to use the norm-growth limiter in front of gradient scaling."},
)
@dataclass
class BAdamArgument:
use_badam: bool = field(
default=False,
metadata={"help": "Whether or not to use the BAdam optimizer."},
)
badam_mode: Literal["layer", "ratio"] = field(
default="layer",
metadata={"help": "Whether to use layer-wise or ratio-wise BAdam optimizer."},
)
badam_start_block: int | None = field(
default=None,
metadata={"help": "The starting block index for layer-wise BAdam."},
)
badam_switch_mode: Literal["ascending", "descending", "random", "fixed"] | None = field(
default="ascending",
metadata={"help": "the strategy of picking block to update for layer-wise BAdam."},
)
badam_switch_interval: int | None = field(
default=50,
metadata={
"help": "Number of steps to update the block for layer-wise BAdam. Use -1 to disable the block update."
},
)
badam_update_ratio: float = field(
default=0.05,
metadata={"help": "The ratio of the update for ratio-wise BAdam."},
)
badam_mask_mode: Literal["adjacent", "scatter"] = field(
default="adjacent",
metadata={
"help": (
"The mode of the mask for BAdam optimizer. "
"`adjacent` means that the trainable parameters are adjacent to each other, "
"`scatter` means that trainable parameters are randomly choosed from the weight."
)
},
)
badam_verbose: int = field(
default=0,
metadata={
"help": (
"The verbosity level of BAdam optimizer. "
"0 for no print, 1 for print the block prefix, 2 for print trainable parameters."
)
},
)
@dataclass
class SwanLabArguments:
use_swanlab: bool = field(
default=False,
metadata={"help": "Whether or not to use the SwanLab (an experiment tracking and visualization tool)."},
)
swanlab_project: str | None = field(
default="llamafactory",
metadata={"help": "The project name in SwanLab."},
)
swanlab_workspace: str | None = field(
default=None,
metadata={"help": "The workspace name in SwanLab."},
)
swanlab_run_name: str | None = field(
default=None,
metadata={"help": "The experiment name in SwanLab."},
)
swanlab_mode: Literal["cloud", "local"] = field(
default="cloud",
metadata={"help": "The mode of SwanLab."},
)
swanlab_api_key: str | None = field(
default=None,
metadata={"help": "The API key for SwanLab."},
)
swanlab_logdir: str | None = field(
default=None,
metadata={"help": "The log directory for SwanLab."},
)
swanlab_lark_webhook_url: str | None = field(
default=None,
metadata={"help": "The Lark(飞书) webhook URL for SwanLab."},
)
swanlab_lark_secret: str | None = field(
default=None,
metadata={"help": "The Lark(飞书) secret for SwanLab."},
)
@dataclass
class FinetuningArguments(
SwanLabArguments,
BAdamArgument,
ApolloArguments,
GaloreArguments,
RLHFArguments,
LoraArguments,
OFTArguments,
FreezeArguments,
):
pure_bf16: bool = field(
default=False,
metadata={"help": "Whether or not to train model in purely bf16 precision (without AMP)."},
)
stage: Literal["pt", "sft", "rm", "ppo", "dpo", "kto"] = field(
default="sft",
metadata={"help": "Which stage will be performed in training."},
)
finetuning_type: Literal["lora", "oft", "freeze", "full"] = field(
default="lora",
metadata={"help": "Which fine-tuning method to use."},
)
use_llama_pro: bool = field(
default=False,
metadata={"help": "Whether or not to make only the parameters in the expanded blocks trainable."},
)
use_adam_mini: bool = field(
default=False,
metadata={"help": "Whether or not to use the Adam-mini optimizer."},
)
use_mca: bool = field(
default=False,
metadata={
"help": (
"Whether or not to use MCA (Megatron Core Adapter) training. "
"Controlled by USE_MCA environment variable."
)
},
)
use_muon: bool = field(
default=False,
metadata={"help": "Whether or not to use the Muon optimizer."},
)
use_dft_loss: bool = field(
default=False,
metadata={"help": "Whether to use the DFT loss."},
)
use_asft_loss: bool = field(
default=False,
metadata={"help": "Whether to use the ASFT loss."},
)
asft_alpha: float = field(
default=0.1,
metadata={"help": "The alpha parameter for ASFT loss to control the power of adaptive weight."},
)
use_eaft_loss: bool = field(
default=False,
metadata={"help": "Whether to use the EAFT loss."},
)
eaft_alpha: float = field(
default=1.0,
metadata={"help": "The alpha parameter for EAFT loss to control the power of adaptive weight."},
)
freeze_vision_tower: bool = field(
default=True,
metadata={"help": "Whether ot not to freeze the vision tower in MLLM training."},
)
freeze_multi_modal_projector: bool = field(
default=True,
metadata={"help": "Whether or not to freeze the multi modal projector in MLLM training."},
)
freeze_language_model: bool = field(
default=False,
metadata={"help": "Whether or not to freeze the language model in MLLM training."},
)
compute_accuracy: bool = field(
default=False,
metadata={"help": "Whether or not to compute the token-level accuracy at evaluation."},
)
disable_shuffling: bool = field(
default=False,
metadata={"help": "Whether or not to disable the shuffling of the training set."},
)
early_stopping_steps: int | None = field(
default=None,
metadata={"help": "Number of steps to stop training if the `metric_for_best_model` does not improve."},
)
plot_loss: bool = field(
default=False,
metadata={"help": "Whether or not to save the training loss curves."},
)
include_effective_tokens_per_second: bool = field(
default=False,
metadata={"help": "Whether or not to compute effective tokens per second."},
)
def __post_init__(self):
def split_arg(arg):
if isinstance(arg, str):
return [item.strip() for item in arg.split(",")]
return arg
self.freeze_trainable_modules: list[str] = split_arg(self.freeze_trainable_modules)
self.freeze_extra_modules: list[str] | None = split_arg(self.freeze_extra_modules)
self.lora_alpha: int = self.lora_alpha or self.lora_rank * 2
self.lora_target: list[str] = split_arg(self.lora_target)
self.oft_target: list[str] = split_arg(self.oft_target)
self.additional_target: list[str] | None = split_arg(self.additional_target)
self.galore_target: list[str] = split_arg(self.galore_target)
self.apollo_target: list[str] = split_arg(self.apollo_target)
self.use_ref_model = self.stage == "dpo" and self.pref_loss not in ["orpo", "simpo"]
assert self.finetuning_type in ["lora", "oft", "freeze", "full"], "Invalid fine-tuning method."
assert self.ref_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."
assert self.reward_model_quantization_bit in [None, 8, 4], "We only accept 4-bit or 8-bit quantization."
if self.stage == "ppo" and self.reward_model is None:
raise ValueError("`reward_model` is necessary for PPO training.")
if self.stage == "ppo" and self.reward_model_type == "lora" and self.finetuning_type != "lora":
raise ValueError("`reward_model_type` cannot be lora for Freeze/Full PPO training.")
if self.stage == "ppo" and self.reward_model_type == "oft" and self.finetuning_type != "oft":
raise ValueError("`reward_model_type` cannot be oft for Freeze/Full PPO training.")
if self.stage == "dpo" and self.pref_loss != "sigmoid" and self.dpo_label_smoothing > 1e-6:
raise ValueError("`dpo_label_smoothing` is only valid for sigmoid loss function.")
if self.use_llama_pro and self.finetuning_type == "full":
raise ValueError("`use_llama_pro` is only valid for Freeze or LoRA training.")
if self.finetuning_type == "lora" and (self.use_galore or self.use_apollo or self.use_badam):
raise ValueError("Cannot use LoRA with GaLore, APOLLO or BAdam together.")
if int(self.use_galore) + int(self.use_apollo) + (self.use_badam) > 1:
raise ValueError("Cannot use GaLore, APOLLO or BAdam together.")
if self.pissa_init and (self.stage in ["ppo", "kto"] or self.use_ref_model):
raise ValueError("Cannot use PiSSA for current training stage.")
if self.finetuning_type != "lora":
if self.loraplus_lr_ratio is not None:
raise ValueError("`loraplus_lr_ratio` is only valid for LoRA training.")
if self.use_rslora:
raise ValueError("`use_rslora` is only valid for LoRA training.")
if self.use_dora:
raise ValueError("`use_dora` is only valid for LoRA training.")
if self.pissa_init:
raise ValueError("`pissa_init` is only valid for LoRA training.")
def to_dict(self) -> dict[str, Any]:
args = asdict(self)
args = {k: f"<{k.upper()}>" if k.endswith("api_key") else v for k, v in args.items()}
return args | --- +++ @@ -18,6 +18,7 @@
@dataclass
class FreezeArguments:
+ r"""Arguments pertaining to the freeze (partial-parameter) training."""
freeze_trainable_layers: int = field(
default=2,
@@ -53,6 +54,7 @@
@dataclass
class LoraArguments:
+ r"""Arguments pertaining to the LoRA training."""
additional_target: str | None = field(
default=None,
@@ -122,6 +124,7 @@
@dataclass
class OFTArguments:
+ r"""Arguments pertaining to the OFT training."""
additional_target: str | None = field(
default=None,
@@ -163,6 +166,7 @@
@dataclass
class RLHFArguments:
+ r"""Arguments pertaining to the PPO, DPO and KTO training."""
pref_beta: float = field(
default=0.1,
@@ -257,6 +261,7 @@
@dataclass
class GaloreArguments:
+ r"""Arguments pertaining to the GaLore algorithm."""
use_galore: bool = field(
default=False,
@@ -295,6 +300,7 @@
@dataclass
class ApolloArguments:
+ r"""Arguments pertaining to the APOLLO algorithm."""
use_apollo: bool = field(
default=False,
@@ -345,6 +351,7 @@
@dataclass
class BAdamArgument:
+ r"""Arguments pertaining to the BAdam optimizer."""
use_badam: bool = field(
default=False,
@@ -444,6 +451,7 @@ OFTArguments,
FreezeArguments,
):
+ r"""Arguments pertaining to which techniques we are going to fine-tuning with."""
pure_bf16: bool = field(
default=False,
@@ -591,4 +599,4 @@ def to_dict(self) -> dict[str, Any]:
args = asdict(self)
args = {k: f"<{k.upper()}>" if k.endswith("api_key") else v for k, v in args.items()}
- return args+ return args
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/hparams/finetuning_args.py |
Provide clean and structured docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import atexit
import json
from collections.abc import AsyncGenerator, AsyncIterator, Sequence
from typing import TYPE_CHECKING, Any, Optional, Union
import requests
from typing_extensions import override
from ..data import get_template_and_fix_tokenizer
from ..extras import logging
from ..extras.constants import AUDIO_PLACEHOLDER, IMAGE_PLACEHOLDER, VIDEO_PLACEHOLDER, EngineName
from ..extras.misc import get_device_count, torch_gc
from ..extras.packages import is_sglang_available
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
from ..model import load_config, load_tokenizer
from ..model.model_utils.quantization import QuantizationMethod
from .base_engine import BaseEngine, Response
if is_sglang_available():
from sglang.utils import launch_server_cmd, terminate_process, wait_for_server # type: ignore
if TYPE_CHECKING:
from ..data.mm_plugin import AudioInput, ImageInput, VideoInput
logger = logging.get_logger(__name__)
class SGLangEngine(BaseEngine):
def __init__(
self,
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
) -> None:
self.name = EngineName.SGLANG
self.model_args = model_args
config = load_config(model_args) # may download model from ms hub
if getattr(config, "quantization_config", None): # gptq models should use float16
quantization_config: dict[str, Any] = getattr(config, "quantization_config", None)
quant_method = quantization_config.get("quant_method", "")
if quant_method == QuantizationMethod.GPTQ and model_args.infer_dtype == "auto":
model_args.infer_dtype = "float16"
self.can_generate = finetuning_args.stage == "sft"
tokenizer_module = load_tokenizer(model_args)
self.tokenizer = tokenizer_module["tokenizer"]
self.processor = tokenizer_module["processor"]
self.tokenizer.padding_side = "left"
self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args)
self.template.mm_plugin.expand_mm_tokens = False # for sglang generate
self.generating_args = generating_args.to_dict()
if model_args.adapter_name_or_path is not None:
self.lora_request = True
else:
self.lora_request = False
launch_cmd = [
"python3 -m sglang.launch_server",
f"--model-path {model_args.model_name_or_path}",
f"--dtype {model_args.infer_dtype}",
f"--context-length {model_args.sglang_maxlen}",
f"--mem-fraction-static {model_args.sglang_mem_fraction}",
f"--tp-size {model_args.sglang_tp_size if model_args.sglang_tp_size != -1 else get_device_count() or 1}",
f"--download-dir {model_args.cache_dir}",
"--log-level error",
]
if self.lora_request:
launch_cmd.extend(
[
"--max-loras-per-batch 1",
f"--lora-backend {model_args.sglang_lora_backend}",
f"--lora-paths lora0={model_args.adapter_name_or_path[0]}",
"--disable-radix-cache",
]
)
launch_cmd = " ".join(launch_cmd)
logger.info_rank0(f"Starting SGLang server with command: {launch_cmd}")
try:
torch_gc()
self.server_process, port = launch_server_cmd(launch_cmd)
self.base_url = f"http://localhost:{port}"
atexit.register(self._cleanup_server)
logger.info_rank0(f"Waiting for SGLang server to be ready at {self.base_url}")
wait_for_server(self.base_url, timeout=300)
logger.info_rank0(f"SGLang server initialized successfully at {self.base_url}")
try:
response = requests.get(f"{self.base_url}/get_model_info", timeout=5)
if response.status_code == 200:
model_info = response.json()
logger.info(f"SGLang server model info: {model_info}")
except Exception as e:
logger.debug(f"Note: could not get model info: {str(e)}")
except Exception as e:
logger.error(f"Failed to start SGLang server: {str(e)}")
self._cleanup_server() # make sure to clean up any started process
raise RuntimeError(f"SGLang server initialization failed: {str(e)}.")
def _cleanup_server(self):
if hasattr(self, "server_process") and self.server_process:
try:
logger.info("Terminating SGLang server process")
terminate_process(self.server_process)
logger.info("SGLang server process terminated")
except Exception as e:
logger.warning(f"Error terminating SGLang server: {str(e)}")
async def _generate(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> AsyncIterator[dict[str, Any]]:
if images is not None and not any(IMAGE_PLACEHOLDER in message["content"] for message in messages):
messages[0]["content"] = IMAGE_PLACEHOLDER * len(images) + messages[0]["content"]
if videos is not None and not any(VIDEO_PLACEHOLDER in message["content"] for message in messages):
messages[0]["content"] = VIDEO_PLACEHOLDER * len(videos) + messages[0]["content"]
if audios is not None and not any(AUDIO_PLACEHOLDER in message["content"] for message in messages):
messages[0]["content"] = AUDIO_PLACEHOLDER * len(audios) + messages[0]["content"]
messages = self.template.mm_plugin.process_messages(
messages, images or [], videos or [], audios or [], self.processor
)
paired_messages = messages + [{"role": "assistant", "content": ""}]
prompt_ids, _ = self.template.encode_oneturn(self.tokenizer, paired_messages, system, tools)
prompt_length = len(prompt_ids)
temperature: Optional[float] = input_kwargs.pop("temperature", None)
top_p: Optional[float] = input_kwargs.pop("top_p", None)
top_k: Optional[float] = input_kwargs.pop("top_k", None)
num_return_sequences: int = input_kwargs.pop("num_return_sequences", 1)
repetition_penalty: Optional[float] = input_kwargs.pop("repetition_penalty", None)
skip_special_tokens: Optional[bool] = input_kwargs.pop("skip_special_tokens", None)
max_length: Optional[int] = input_kwargs.pop("max_length", None)
max_new_tokens: Optional[int] = input_kwargs.pop("max_new_tokens", None)
stop: Optional[Union[str, list[str]]] = input_kwargs.pop("stop", None)
if num_return_sequences != 1:
raise NotImplementedError("SGLang only supports n=1.")
if "max_new_tokens" in self.generating_args:
max_tokens = self.generating_args["max_new_tokens"]
elif "max_length" in self.generating_args:
if self.generating_args["max_length"] > prompt_length:
max_tokens = self.generating_args["max_length"] - prompt_length
else:
max_tokens = 1
if max_length:
max_tokens = max_length - prompt_length if max_length > prompt_length else 1
if max_new_tokens:
max_tokens = max_new_tokens
sampling_params = {
"temperature": temperature if temperature is not None else self.generating_args["temperature"],
"top_p": (top_p if top_p is not None else self.generating_args["top_p"]) or 1.0, # top_p must > 0
"top_k": (top_k if top_k is not None else self.generating_args["top_k"]) or -1, # top_k must > 0
"stop": stop,
"stop_token_ids": self.template.get_stop_token_ids(self.tokenizer),
"max_new_tokens": max_tokens,
"repetition_penalty": (
repetition_penalty if repetition_penalty is not None else self.generating_args["repetition_penalty"]
)
or 1.0, # repetition_penalty must > 0
"skip_special_tokens": skip_special_tokens
if skip_special_tokens is not None
else self.generating_args["skip_special_tokens"],
}
def stream_request():
json_data = {
"input_ids": prompt_ids,
"sampling_params": sampling_params,
"stream": True,
}
if self.lora_request:
json_data["lora_request"] = ["lora0"]
response = requests.post(f"{self.base_url}/generate", json=json_data, stream=True)
if response.status_code != 200:
raise RuntimeError(f"SGLang server error: {response.status_code}, {response.text}")
for chunk in response.iter_lines(decode_unicode=False):
chunk = str(chunk.decode("utf-8"))
if chunk == "data: [DONE]":
break
if chunk and chunk.startswith("data:"):
yield json.loads(chunk[5:].strip("\n"))
return await asyncio.to_thread(stream_request)
@override
async def chat(
self,
messages: Sequence[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[Sequence["ImageInput"]] = None,
videos: Optional[Sequence["VideoInput"]] = None,
audios: Optional[Sequence["AudioInput"]] = None,
**input_kwargs,
) -> list["Response"]:
final_output = None
generator = await self._generate(messages, system, tools, images, videos, audios, **input_kwargs)
for request_output in generator:
final_output = request_output
results = [
Response(
response_text=final_output["text"],
response_length=final_output["meta_info"]["completion_tokens"],
prompt_length=final_output["meta_info"]["prompt_tokens"],
finish_reason="stop" if final_output["meta_info"]["finish_reason"] == "stop" else "length",
)
]
return results
@override
async def stream_chat(
self,
messages: list[dict[str, str]],
system: Optional[str] = None,
tools: Optional[str] = None,
images: Optional[list["ImageInput"]] = None,
videos: Optional[list["VideoInput"]] = None,
audios: Optional[list["AudioInput"]] = None,
**input_kwargs,
) -> AsyncGenerator[str, None]:
generated_text = ""
generator = await self._generate(messages, system, tools, images, videos, audios, **input_kwargs)
for result in generator:
delta_text = result["text"][len(generated_text) :]
generated_text = result["text"]
yield delta_text
@override
async def get_scores(
self,
batch_input: list[str],
**input_kwargs,
) -> list[float]:
raise NotImplementedError("SGLang engine does not support `get_scores`.")
def __del__(self):
self._cleanup_server()
try:
atexit.unregister(self._cleanup_server)
except Exception:
pass | --- +++ @@ -44,6 +44,16 @@
class SGLangEngine(BaseEngine):
+ """Inference engine for SGLang models.
+
+ This class wraps the SGLang engine to provide a consistent interface for text generation
+ that matches LLaMA Factory's requirements. It uses the SGLang HTTP server approach for
+ better interaction and performance. The engine launches a server process and communicates
+ with it via HTTP requests.
+
+ For more details on the SGLang HTTP server approach, see:
+ https://docs.sglang.ai/backend/send_request.html
+ """
def __init__(
self,
@@ -118,6 +128,7 @@ raise RuntimeError(f"SGLang server initialization failed: {str(e)}.")
def _cleanup_server(self):
+ r"""Clean up the server process when the engine is destroyed."""
if hasattr(self, "server_process") and self.server_process:
try:
logger.info("Terminating SGLang server process")
@@ -270,8 +281,9 @@ raise NotImplementedError("SGLang engine does not support `get_scores`.")
def __del__(self):
+ r"""Ensure server is cleaned up when object is deleted."""
self._cleanup_server()
try:
atexit.unregister(self._cleanup_server)
except Exception:
- pass+ pass
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/chat/sglang_engine.py |
Improve documentation using docstrings | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/language-modeling/run_clm.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import sys
from pathlib import Path
from typing import Any, Optional
import torch
import transformers
from omegaconf import OmegaConf
from transformers import HfArgumentParser
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.trainer_utils import get_last_checkpoint
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_bf16_gpu_available, is_torch_npu_available
from ..extras import logging
from ..extras.constants import CHECKPOINT_NAMES, EngineName
from ..extras.misc import check_dependencies, check_version, get_current_device, is_env_enabled
from ..extras.packages import is_mcore_adapter_available, is_transformers_version_greater_than
from .data_args import DataArguments
from .evaluation_args import EvaluationArguments
from .finetuning_args import FinetuningArguments
from .generating_args import GeneratingArguments
from .model_args import ModelArguments
from .training_args import RayArguments, TrainingArguments
logger = logging.get_logger(__name__)
check_dependencies()
_TRAIN_ARGS = [ModelArguments, DataArguments, TrainingArguments, FinetuningArguments, GeneratingArguments]
_TRAIN_CLS = tuple[ModelArguments, DataArguments, TrainingArguments, FinetuningArguments, GeneratingArguments]
_INFER_ARGS = [ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments]
_INFER_CLS = tuple[ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments]
_EVAL_ARGS = [ModelArguments, DataArguments, EvaluationArguments, FinetuningArguments]
_EVAL_CLS = tuple[ModelArguments, DataArguments, EvaluationArguments, FinetuningArguments]
if is_mcore_adapter_available() and is_env_enabled("USE_MCA"):
from mcore_adapter import TrainingArguments as McaTrainingArguments
_TRAIN_MCA_ARGS = [ModelArguments, DataArguments, McaTrainingArguments, FinetuningArguments, GeneratingArguments]
_TRAIN_MCA_CLS = tuple[
ModelArguments, DataArguments, McaTrainingArguments, FinetuningArguments, GeneratingArguments
]
else:
_TRAIN_MCA_ARGS = []
_TRAIN_MCA_CLS = tuple()
def read_args(args: dict[str, Any] | list[str] | None = None) -> dict[str, Any] | list[str]:
if args is not None:
return args
if len(sys.argv) > 1 and (sys.argv[1].endswith(".yaml") or sys.argv[1].endswith(".yml")):
override_config = OmegaConf.from_cli(sys.argv[2:])
dict_config = OmegaConf.load(Path(sys.argv[1]).absolute())
return OmegaConf.to_container(OmegaConf.merge(dict_config, override_config))
elif len(sys.argv) > 1 and sys.argv[1].endswith(".json"):
override_config = OmegaConf.from_cli(sys.argv[2:])
dict_config = OmegaConf.create(json.load(Path(sys.argv[1]).absolute()))
return OmegaConf.to_container(OmegaConf.merge(dict_config, override_config))
else:
return sys.argv[1:]
def _parse_args(
parser: "HfArgumentParser", args: dict[str, Any] | list[str] | None = None, allow_extra_keys: bool = False
) -> tuple[Any]:
args = read_args(args)
if isinstance(args, dict):
return parser.parse_dict(args, allow_extra_keys=allow_extra_keys)
(*parsed_args, unknown_args) = parser.parse_args_into_dataclasses(args=args, return_remaining_strings=True)
if unknown_args and not allow_extra_keys:
print(parser.format_help())
print(f"Got unknown args, potentially deprecated arguments: {unknown_args}")
raise ValueError(f"Some specified arguments are not used by the HfArgumentParser: {unknown_args}")
return tuple(parsed_args)
def _verify_trackio_args(training_args: "TrainingArguments") -> None:
report_to = training_args.report_to
if not report_to:
return
if isinstance(report_to, str):
report_to = [report_to]
if "trackio" not in report_to:
return
# --- Enforce project (required by Trackio) ---
if not training_args.project:
raise ValueError("`--project` must be specified when using Trackio.")
# --- Validate trackio_space_id format ---
space_id = training_args.trackio_space_id
if space_id:
if space_id != "trackio" and "/" not in space_id:
logger.warning(
f"trackio_space_id '{space_id}' should typically be in format "
"'org/space' for Hugging Face Spaces deployment."
)
# --- Inform about default project usage ---
if training_args.project == "huggingface":
logger.info(
"Using default project name 'huggingface'. "
"Consider setting a custom project name with --project "
"for better organization."
)
# --- Validate hub repo privacy flag ---
if training_args.hub_private_repo:
logger.info("Repository will be created as private on Hugging Face Hub.")
# --- Recommend run_name for experiment clarity ---
if not training_args.run_name:
logger.warning("Consider setting --run_name for better experiment tracking clarity.")
def _set_transformers_logging() -> None:
if os.getenv("LLAMAFACTORY_VERBOSITY", "INFO") in ["DEBUG", "INFO"]:
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
def _set_env_vars() -> None:
if is_torch_npu_available():
# avoid JIT compile on NPU devices, see https://zhuanlan.zhihu.com/p/660875458
torch.npu.set_compile_mode(jit_compile=is_env_enabled("NPU_JIT_COMPILE"))
# avoid use fork method on NPU devices, see https://github.com/hiyouga/LLaMA-Factory/issues/7447
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
def _verify_model_args(
model_args: "ModelArguments",
data_args: "DataArguments",
finetuning_args: "FinetuningArguments",
) -> None:
if model_args.adapter_name_or_path is not None and finetuning_args.finetuning_type != "lora":
raise ValueError("Adapter is only valid for the LoRA method.")
if model_args.quantization_bit is not None:
if finetuning_args.finetuning_type not in ["lora", "oft"]:
raise ValueError("Quantization is only compatible with the LoRA or OFT method.")
if finetuning_args.pissa_init:
raise ValueError("Please use scripts/pissa_init.py to initialize PiSSA for a quantized model.")
if model_args.resize_vocab:
raise ValueError("Cannot resize embedding layers of a quantized model.")
if model_args.adapter_name_or_path is not None and finetuning_args.create_new_adapter:
raise ValueError("Cannot create new adapter upon a quantized model.")
if model_args.adapter_name_or_path is not None and len(model_args.adapter_name_or_path) != 1:
raise ValueError("Quantized model only accepts a single adapter. Merge them first.")
def _check_extra_dependencies(
model_args: "ModelArguments",
finetuning_args: "FinetuningArguments",
training_args: Optional["TrainingArguments"] = None,
) -> None:
if model_args.use_kt:
check_version("ktransformers", mandatory=True)
if model_args.use_unsloth:
check_version("unsloth", mandatory=True)
if model_args.enable_liger_kernel:
check_version("liger-kernel", mandatory=True)
if model_args.mixture_of_depths is not None:
check_version("mixture-of-depth>=1.1.6", mandatory=True)
if model_args.infer_backend == EngineName.VLLM:
check_version("vllm>=0.4.3,<=0.11.0")
check_version("vllm", mandatory=True)
elif model_args.infer_backend == EngineName.SGLANG:
check_version("sglang>=0.4.5")
check_version("sglang", mandatory=True)
if finetuning_args.use_galore:
check_version("galore_torch", mandatory=True)
if finetuning_args.use_apollo:
check_version("apollo_torch", mandatory=True)
if finetuning_args.use_badam:
check_version("badam>=1.2.1", mandatory=True)
if finetuning_args.use_adam_mini:
check_version("adam-mini", mandatory=True)
if finetuning_args.use_swanlab:
check_version("swanlab", mandatory=True)
if finetuning_args.plot_loss:
check_version("matplotlib", mandatory=True)
if training_args is not None:
if training_args.deepspeed:
check_version("deepspeed", mandatory=True)
if training_args.predict_with_generate:
check_version("jieba", mandatory=True)
check_version("nltk", mandatory=True)
check_version("rouge_chinese", mandatory=True)
def _parse_train_args(args: dict[str, Any] | list[str] | None = None) -> _TRAIN_CLS:
parser = HfArgumentParser(_TRAIN_ARGS)
allow_extra_keys = is_env_enabled("ALLOW_EXTRA_ARGS")
return _parse_args(parser, args, allow_extra_keys=allow_extra_keys)
def _parse_train_mca_args(args: dict[str, Any] | list[str] | None = None) -> _TRAIN_MCA_CLS:
parser = HfArgumentParser(_TRAIN_MCA_ARGS)
allow_extra_keys = is_env_enabled("ALLOW_EXTRA_ARGS")
model_args, data_args, training_args, finetuning_args, generating_args = _parse_args(
parser, args, allow_extra_keys=allow_extra_keys
)
_configure_mca_training_args(training_args, data_args, finetuning_args)
return model_args, data_args, training_args, finetuning_args, generating_args
def _configure_mca_training_args(training_args, data_args, finetuning_args) -> None:
training_args.predict_with_generate = False
training_args.generation_max_length = data_args.cutoff_len
training_args.generation_num_beams = 1
training_args.use_mca = True
finetuning_args.use_mca = True
def _parse_infer_args(args: dict[str, Any] | list[str] | None = None) -> _INFER_CLS:
parser = HfArgumentParser(_INFER_ARGS)
allow_extra_keys = is_env_enabled("ALLOW_EXTRA_ARGS")
return _parse_args(parser, args, allow_extra_keys=allow_extra_keys)
def _parse_eval_args(args: dict[str, Any] | list[str] | None = None) -> _EVAL_CLS:
parser = HfArgumentParser(_EVAL_ARGS)
allow_extra_keys = is_env_enabled("ALLOW_EXTRA_ARGS")
return _parse_args(parser, args, allow_extra_keys=allow_extra_keys)
def get_ray_args(args: dict[str, Any] | list[str] | None = None) -> RayArguments:
parser = HfArgumentParser(RayArguments)
(ray_args,) = _parse_args(parser, args, allow_extra_keys=True)
return ray_args
def get_train_args(args: dict[str, Any] | list[str] | None = None) -> _TRAIN_CLS:
if is_env_enabled("USE_MCA"):
model_args, data_args, training_args, finetuning_args, generating_args = _parse_train_mca_args(args)
else:
model_args, data_args, training_args, finetuning_args, generating_args = _parse_train_args(args)
finetuning_args.use_mca = False
# Setup logging
if training_args.should_log:
_set_transformers_logging()
# Check arguments
if finetuning_args.stage != "sft":
if training_args.predict_with_generate:
raise ValueError("`predict_with_generate` cannot be set as True except SFT.")
if data_args.neat_packing:
raise ValueError("`neat_packing` cannot be set as True except SFT.")
if data_args.train_on_prompt or data_args.mask_history:
raise ValueError("`train_on_prompt` or `mask_history` cannot be set as True except SFT.")
if finetuning_args.stage == "sft" and training_args.do_predict and not training_args.predict_with_generate:
raise ValueError("Please enable `predict_with_generate` to save model predictions.")
if finetuning_args.stage in ["rm", "ppo"] and training_args.load_best_model_at_end:
raise ValueError("RM and PPO stages do not support `load_best_model_at_end`.")
if finetuning_args.stage == "ppo":
if not training_args.do_train:
raise ValueError("PPO training does not support evaluation, use the SFT stage to evaluate models.")
if model_args.shift_attn:
raise ValueError("PPO training is incompatible with S^2-Attn.")
if finetuning_args.reward_model_type == "lora" and model_args.use_kt:
raise ValueError("KTransformers does not support lora reward model.")
if finetuning_args.reward_model_type == "lora" and model_args.use_unsloth:
raise ValueError("Unsloth does not support lora reward model.")
if training_args.report_to and any(
logger not in ("wandb", "tensorboard", "trackio", "none") for logger in training_args.report_to
):
raise ValueError("PPO only accepts wandb, tensorboard, or trackio logger.")
if not model_args.use_kt and training_args.parallel_mode == ParallelMode.NOT_DISTRIBUTED:
raise ValueError("Please launch distributed training with `llamafactory-cli` or `torchrun`.")
if training_args.deepspeed and training_args.parallel_mode != ParallelMode.DISTRIBUTED:
raise ValueError("Please use `FORCE_TORCHRUN=1` to launch DeepSpeed training.")
if training_args.max_steps == -1 and data_args.streaming:
raise ValueError("Please specify `max_steps` in streaming mode.")
if training_args.do_train and data_args.dataset is None:
raise ValueError("Please specify dataset for training.")
if (training_args.do_eval or training_args.do_predict or training_args.predict_with_generate) and (
data_args.eval_dataset is None and data_args.val_size < 1e-6
):
raise ValueError("Please make sure eval_dataset be provided or val_size >1e-6")
if training_args.predict_with_generate:
if is_deepspeed_zero3_enabled():
raise ValueError("`predict_with_generate` is incompatible with DeepSpeed ZeRO-3.")
if finetuning_args.compute_accuracy:
raise ValueError("Cannot use `predict_with_generate` and `compute_accuracy` together.")
if training_args.do_train and model_args.quantization_device_map == "auto":
raise ValueError("Cannot use device map for quantized models in training.")
if finetuning_args.pissa_init and is_deepspeed_zero3_enabled():
raise ValueError("Please use scripts/pissa_init.py to initialize PiSSA in DeepSpeed ZeRO-3.")
if finetuning_args.pure_bf16:
if not (is_torch_bf16_gpu_available() or (is_torch_npu_available() and torch.npu.is_bf16_supported())):
raise ValueError("This device does not support `pure_bf16`.")
if is_deepspeed_zero3_enabled():
raise ValueError("`pure_bf16` is incompatible with DeepSpeed ZeRO-3.")
if training_args.parallel_mode == ParallelMode.DISTRIBUTED:
if finetuning_args.use_galore and finetuning_args.galore_layerwise:
raise ValueError("Distributed training does not support layer-wise GaLore.")
if finetuning_args.use_apollo and finetuning_args.apollo_layerwise:
raise ValueError("Distributed training does not support layer-wise APOLLO.")
if finetuning_args.use_badam:
if finetuning_args.badam_mode == "ratio":
raise ValueError("Radio-based BAdam does not yet support distributed training, use layer-wise BAdam.")
elif not is_deepspeed_zero3_enabled():
raise ValueError("Layer-wise BAdam only supports DeepSpeed ZeRO-3 training.")
if training_args.deepspeed is not None and (finetuning_args.use_galore or finetuning_args.use_apollo):
raise ValueError("GaLore and APOLLO are incompatible with DeepSpeed yet.")
if not finetuning_args.use_mca and training_args.fp8 and model_args.quantization_bit is not None:
raise ValueError("FP8 training is not compatible with quantization. Please disable one of them.")
if model_args.infer_backend != EngineName.HF:
raise ValueError("vLLM/SGLang backend is only available for API, CLI and Web.")
if model_args.use_unsloth and is_deepspeed_zero3_enabled():
raise ValueError("Unsloth is incompatible with DeepSpeed ZeRO-3.")
if model_args.use_kt and is_deepspeed_zero3_enabled():
raise ValueError("KTransformers is incompatible with DeepSpeed ZeRO-3.")
if data_args.neat_packing and is_transformers_version_greater_than("4.53.0"):
raise ValueError("Neat packing is incompatible with transformers>=4.53.0.")
_set_env_vars()
_verify_model_args(model_args, data_args, finetuning_args)
_check_extra_dependencies(model_args, finetuning_args, training_args)
_verify_trackio_args(training_args)
if not finetuning_args.use_mca and training_args.fp8_enable_fsdp_float8_all_gather and not training_args.fp8:
logger.warning_rank0("fp8_enable_fsdp_float8_all_gather requires fp8=True. Setting fp8=True.")
model_args.fp8 = True
if (
training_args.do_train
and finetuning_args.finetuning_type == "lora"
and model_args.quantization_bit is None
and model_args.resize_vocab
and finetuning_args.additional_target is None
):
logger.warning_rank0(
"Remember to add embedding layers to `additional_target` to make the added tokens trainable."
)
if training_args.do_train and model_args.quantization_bit is not None and (not model_args.upcast_layernorm):
logger.warning_rank0("We recommend enable `upcast_layernorm` in quantized training.")
if training_args.do_train and (not training_args.fp16) and (not training_args.bf16):
logger.warning_rank0("We recommend enable mixed precision training.")
if (
training_args.do_train
and (finetuning_args.use_galore or finetuning_args.use_apollo)
and not finetuning_args.pure_bf16
):
logger.warning_rank0(
"Using GaLore or APOLLO with mixed precision training may significantly increases GPU memory usage."
)
if (not training_args.do_train) and model_args.quantization_bit is not None:
logger.warning_rank0("Evaluating model in 4/8-bit mode may cause lower scores.")
if (not training_args.do_train) and finetuning_args.stage == "dpo" and finetuning_args.ref_model is None:
logger.warning_rank0("Specify `ref_model` for computing rewards at evaluation.")
# Post-process training arguments
training_args.generation_max_length = training_args.generation_max_length or data_args.cutoff_len
training_args.generation_num_beams = data_args.eval_num_beams or training_args.generation_num_beams
training_args.remove_unused_columns = False # important for multimodal dataset
if finetuning_args.finetuning_type == "lora":
# https://github.com/huggingface/transformers/blob/v4.50.0/src/transformers/trainer.py#L782
training_args.label_names = training_args.label_names or ["labels"]
if "swanlab" in training_args.report_to and finetuning_args.use_swanlab:
training_args.report_to.remove("swanlab")
if (
training_args.parallel_mode == ParallelMode.DISTRIBUTED
and training_args.ddp_find_unused_parameters is None
and finetuning_args.finetuning_type == "lora"
):
logger.info_rank0("Set `ddp_find_unused_parameters` to False in DDP training since LoRA is enabled.")
training_args.ddp_find_unused_parameters = False
if finetuning_args.stage in ["rm", "ppo"] and finetuning_args.finetuning_type in ["full", "freeze"]:
can_resume_from_checkpoint = False
if training_args.resume_from_checkpoint is not None:
logger.warning_rank0("Cannot resume from checkpoint in current stage.")
training_args.resume_from_checkpoint = None
else:
can_resume_from_checkpoint = True
if (
training_args.resume_from_checkpoint is None
and training_args.do_train
and os.path.isdir(training_args.output_dir)
and not getattr(training_args, "overwrite_output_dir", False) # for mca training args and transformers >= 5.0
and can_resume_from_checkpoint
):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and any(
os.path.isfile(os.path.join(training_args.output_dir, name)) for name in CHECKPOINT_NAMES
):
raise ValueError("Output directory already exists and is not empty. Please set `overwrite_output_dir`.")
if last_checkpoint is not None:
training_args.resume_from_checkpoint = last_checkpoint
logger.info_rank0(f"Resuming training from {training_args.resume_from_checkpoint}.")
logger.info_rank0("Change `output_dir` or use `overwrite_output_dir` to avoid.")
if (
finetuning_args.stage in ["rm", "ppo"]
and finetuning_args.finetuning_type == "lora"
and training_args.resume_from_checkpoint is not None
):
logger.warning_rank0(
f"Add {training_args.resume_from_checkpoint} to `adapter_name_or_path` to resume training from checkpoint."
)
# Post-process model arguments
if training_args.bf16 or finetuning_args.pure_bf16:
model_args.compute_dtype = torch.bfloat16
elif training_args.fp16:
model_args.compute_dtype = torch.float16
model_args.device_map = {"": get_current_device()}
model_args.model_max_length = data_args.cutoff_len
model_args.block_diag_attn = data_args.neat_packing
data_args.packing = data_args.packing if data_args.packing is not None else finetuning_args.stage == "pt"
# Log on each process the small summary
logger.info(
f"Process rank: {training_args.process_index}, "
f"world size: {training_args.world_size}, device: {training_args.device}, "
f"distributed training: {training_args.parallel_mode == ParallelMode.DISTRIBUTED}, "
f"compute dtype: {str(model_args.compute_dtype)}"
)
transformers.set_seed(training_args.seed)
return model_args, data_args, training_args, finetuning_args, generating_args
def get_infer_args(args: dict[str, Any] | list[str] | None = None) -> _INFER_CLS:
model_args, data_args, finetuning_args, generating_args = _parse_infer_args(args)
# Setup logging
_set_transformers_logging()
# Check arguments
if model_args.infer_backend == "vllm":
if finetuning_args.stage != "sft":
raise ValueError("vLLM engine only supports auto-regressive models.")
if model_args.quantization_bit is not None:
raise ValueError("vLLM engine does not support bnb quantization (GPTQ and AWQ are supported).")
if model_args.rope_scaling is not None:
raise ValueError("vLLM engine does not support RoPE scaling.")
if model_args.adapter_name_or_path is not None and len(model_args.adapter_name_or_path) != 1:
raise ValueError("vLLM only accepts a single adapter. Merge them first.")
_set_env_vars()
_verify_model_args(model_args, data_args, finetuning_args)
_check_extra_dependencies(model_args, finetuning_args)
# Post-process model arguments
if model_args.export_dir is not None and model_args.export_device == "cpu":
model_args.device_map = {"": torch.device("cpu")}
if data_args.cutoff_len != DataArguments().cutoff_len: # override cutoff_len if it is not default
model_args.model_max_length = data_args.cutoff_len
else:
model_args.device_map = "auto"
return model_args, data_args, finetuning_args, generating_args
def get_eval_args(args: dict[str, Any] | list[str] | None = None) -> _EVAL_CLS:
model_args, data_args, eval_args, finetuning_args = _parse_eval_args(args)
# Setup logging
_set_transformers_logging()
# Check arguments
if model_args.infer_backend != EngineName.HF:
raise ValueError("vLLM/SGLang backend is only available for API, CLI and Web.")
_set_env_vars()
_verify_model_args(model_args, data_args, finetuning_args)
_check_extra_dependencies(model_args, finetuning_args)
model_args.device_map = "auto"
transformers.set_seed(eval_args.seed)
return model_args, data_args, eval_args, finetuning_args | --- +++ @@ -67,6 +67,7 @@
def read_args(args: dict[str, Any] | list[str] | None = None) -> dict[str, Any] | list[str]:
+ r"""Get arguments from the command line or a config file."""
if args is not None:
return args
@@ -100,6 +101,11 @@
def _verify_trackio_args(training_args: "TrainingArguments") -> None:
+ """Validates Trackio-specific arguments.
+
+ Args:
+ training_args: TrainingArguments instance (not a dictionary)
+ """
report_to = training_args.report_to
if not report_to:
return
@@ -251,6 +257,7 @@
def _configure_mca_training_args(training_args, data_args, finetuning_args) -> None:
+ """Patch training args to avoid args checking errors and sync MCA settings."""
training_args.predict_with_generate = False
training_args.generation_max_length = data_args.cutoff_len
training_args.generation_num_beams = 1
@@ -562,4 +569,4 @@
transformers.set_seed(eval_args.seed)
- return model_args, data_args, eval_args, finetuning_args+ return model_args, data_args, eval_args, finetuning_args
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/hparams/parser.py |
Write docstrings for data processing functions | # Copyright 2025 OpenAccess AI Collective and the LlamaFactory team.
#
# This code is inspired by the OpenAccess AI Collective's axolotl library.
# https://github.com/OpenAccess-AI-Collective/axolotl/blob/main/src/axolotl/monkeypatch/utils.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Literal, Optional
import numpy as np
import torch
import torch.nn.functional as F
from peft import PeftModel
from transformers import DataCollatorForSeq2Seq
from ..extras.constants import AUDIO_PLACEHOLDER, IGNORE_INDEX, IMAGE_PLACEHOLDER
from ..extras.packages import is_pillow_available
if is_pillow_available():
from PIL import Image
if TYPE_CHECKING:
from transformers import ProcessorMixin
from .template import Template
def prepare_4d_attention_mask(attention_mask_with_indices: "torch.Tensor", dtype: "torch.dtype") -> "torch.Tensor":
_, seq_len = attention_mask_with_indices.size()
min_dtype = torch.finfo(dtype).min
zero_tensor = torch.tensor(0, dtype=dtype)
# Create a non-padding mask.
non_padding_mask = (attention_mask_with_indices != 0).unsqueeze(1).unsqueeze(2)
# Create indices for comparison.
indices = attention_mask_with_indices.unsqueeze(1).unsqueeze(2) # [bsz, 1, 1, seq_len]
indices_t = attention_mask_with_indices.unsqueeze(1).unsqueeze(3) # [bsz, 1, seq_len, 1]
# Create a lower triangular mask.
tril_mask = torch.tril(torch.ones((seq_len, seq_len), dtype=torch.bool))
attention_mask_4d = (indices == indices_t) & non_padding_mask & tril_mask
# Invert the attention mask.
attention_mask_4d = torch.where(attention_mask_4d, zero_tensor, min_dtype)
return attention_mask_4d
@dataclass
class MultiModalDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
template: Optional["Template"] = None
processor: Optional["ProcessorMixin"] = None
def __post_init__(self):
if self.template is None:
raise ValueError("Template is required for MultiModalDataCollator.")
if isinstance(self.model, PeftModel):
self.model = self.model.base_model.model
if self.model is not None and hasattr(self.model, "get_rope_index"): # for qwen2vl mrope
self.get_rope_func = self.model.get_rope_index # transformers < 4.52.0 or qwen2.5 omni
elif self.model is not None and hasattr(self.model, "model") and hasattr(self.model.model, "get_rope_index"):
self.get_rope_func = self.model.model.get_rope_index # transformers >= 4.52.0
else:
self.get_rope_func = None
def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]:
batch_images, batch_videos, batch_audios = [], [], []
batch_imglens, batch_vidlens, batch_audlens, batch_input_ids = [], [], [], []
for feature in features:
images = feature.pop("images", None) or []
videos = feature.pop("videos", None) or []
audios = feature.pop("audios", None) or []
batch_images.extend(images)
batch_videos.extend(videos)
batch_audios.extend(audios)
batch_imglens.append(len(images))
batch_vidlens.append(len(videos))
batch_audlens.append(len(audios))
batch_input_ids.append(feature["input_ids"])
fake_input_ids = []
if (
self.template.mm_plugin.image_token is not None and sum(batch_imglens) == 0 and sum(batch_vidlens) == 0
): # avoid process hanging in zero3/fsdp case
fake_messages = [{"role": "user", "content": IMAGE_PLACEHOLDER}]
fake_images = [Image.new("RGB", (64, 64), (255, 255, 255))]
fake_messages = self.template.mm_plugin.process_messages(
fake_messages, fake_images, [], [], self.processor
)
_fake_input_ids = self.tokenizer.encode(fake_messages[0]["content"], add_special_tokens=False)
_fake_input_ids, _ = self.template.mm_plugin.process_token_ids(
_fake_input_ids, None, fake_images, [], [], self.tokenizer, self.processor
)
fake_input_ids.extend(_fake_input_ids)
batch_images = fake_images
batch_imglens[0] = 1
if (
self.template.mm_plugin.audio_token is not None and sum(batch_audlens) == 0
): # avoid process hanging in zero3/fsdp case
fake_messages = [{"role": "user", "content": AUDIO_PLACEHOLDER}]
fake_audios = [np.zeros(1600)]
fake_messages = self.template.mm_plugin.process_messages(
fake_messages, [], [], fake_audios, self.processor
)
_fake_input_ids = self.tokenizer.encode(fake_messages[0]["content"], add_special_tokens=False)
_fake_input_ids, _ = self.template.mm_plugin.process_token_ids(
_fake_input_ids, None, [], [], fake_audios, self.tokenizer, self.processor
)
fake_input_ids.extend(_fake_input_ids)
batch_audios = fake_audios
batch_audlens[0] = 1
if len(fake_input_ids) != 0:
if self.tokenizer.padding_side == "right":
features[0]["input_ids"] = features[0]["input_ids"] + fake_input_ids
features[0]["attention_mask"] = features[0]["attention_mask"] + [0] * len(fake_input_ids)
features[0]["labels"] = features[0]["labels"] + [IGNORE_INDEX] * len(fake_input_ids)
else:
features[0]["input_ids"] = fake_input_ids + features[0]["input_ids"]
features[0]["attention_mask"] = [0] * len(fake_input_ids) + features[0]["attention_mask"]
features[0]["labels"] = [IGNORE_INDEX] * len(fake_input_ids) + features[0]["labels"]
batch_input_ids[0] = features[0]["input_ids"]
mm_inputs = self.template.mm_plugin.get_mm_inputs(
batch_images,
batch_videos,
batch_audios,
batch_imglens,
batch_vidlens,
batch_audlens,
batch_input_ids,
self.processor,
)
if "token_type_ids" in mm_inputs:
token_type_ids = mm_inputs.pop("token_type_ids")
for i, feature in enumerate(features):
feature["token_type_ids"] = token_type_ids[i]
features: dict[str, torch.Tensor] = super().__call__(features)
if self.get_rope_func is not None:
rope_index_kwargs = {
"input_ids": features["input_ids"],
"image_grid_thw": mm_inputs.get("image_grid_thw"),
"video_grid_thw": mm_inputs.get("video_grid_thw"),
"attention_mask": (features["attention_mask"] >= 1).float(),
}
if "mm_token_type_ids" in inspect.signature(self.get_rope_func).parameters:
image_token_id = getattr(self.model.config, "image_token_id", None)
video_token_id = getattr(self.model.config, "video_token_id", None)
if image_token_id is not None or video_token_id is not None:
mm_token_type_ids = torch.zeros_like(features["input_ids"])
if image_token_id is not None:
mm_token_type_ids[features["input_ids"] == image_token_id] = 1
if video_token_id is not None:
mm_token_type_ids[features["input_ids"] == video_token_id] = 2
rope_index_kwargs["mm_token_type_ids"] = mm_token_type_ids
if "second_per_grid_ts" in mm_inputs: # for qwen2vl
rope_index_kwargs["second_per_grid_ts"] = mm_inputs.get("second_per_grid_ts")
elif "video_second_per_grid" in mm_inputs: # for qwen2.5 omni
rope_index_kwargs["second_per_grids"] = mm_inputs.get("video_second_per_grid")
if getattr(self.model.config, "model_type", None) in ["qwen2_5_omni_thinker", "qwen3_omni_moe_thinker"]:
rope_index_kwargs["use_audio_in_video"] = getattr(self.processor, "use_audio_in_video", False)
feature_attention_mask = mm_inputs.get("feature_attention_mask", None)
if feature_attention_mask is not None: # FIXME: need to get video image lengths
audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)
rope_index_kwargs["audio_seqlens"] = audio_feature_lengths # prepare for input
features["position_ids"], rope_deltas = self.get_rope_func(**rope_index_kwargs)
features["rope_deltas"] = rope_deltas - (1 - rope_index_kwargs["attention_mask"]).sum(
dim=-1
).unsqueeze(-1)
else: # for qwen vl
features["position_ids"], features["rope_deltas"] = self.get_rope_func(**rope_index_kwargs)
if (
self.model is not None
and getattr(self.model.config, "model_type", None)
in [
"glm4v",
"glm_ocr",
"Keye",
"qwen2_vl",
"qwen2_5_vl",
"qwen2_5_omni_thinker",
"qwen3_omni_moe_thinker",
"qwen3_5",
"qwen3_vl",
"qwen3_vl_moe",
]
and ("position_ids" not in features or features["position_ids"].dim() != 3)
):
raise ValueError(f"{self.model.config.model_type} requires 3D position ids for mrope.")
if "cross_attention_mask" in mm_inputs: # for mllama inputs when pad_to_multiple_of is enabled
cross_attention_mask = mm_inputs.pop("cross_attention_mask")
seq_len = features["input_ids"].size(1)
orig_len = cross_attention_mask.size(1)
mm_inputs["cross_attention_mask"] = F.pad(cross_attention_mask, (0, 0, 0, 0, 0, seq_len - orig_len))
features.update(mm_inputs)
if "image_bound" in features: # for minicpmv inputs
bsz, seq_length = features["input_ids"].shape
features["position_ids"] = torch.arange(seq_length).long().repeat(bsz, 1)
return {"data": features, "input_ids": features["input_ids"], "labels": features["labels"]}
return features
@dataclass
class SFTDataCollatorWith4DAttentionMask(MultiModalDataCollatorForSeq2Seq):
block_diag_attn: bool = False
attn_implementation: Literal["eager", "sdpa", "flash_attention_2"] = "eager"
compute_dtype: "torch.dtype" = torch.float32
def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]:
features = super().__call__(features)
if self.block_diag_attn and self.attn_implementation != "flash_attention_2":
features["attention_mask"] = prepare_4d_attention_mask(features["attention_mask"], self.compute_dtype)
for key, value in features.items(): # cast data dtype for paligemma
if torch.is_tensor(value) and torch.is_floating_point(value):
features[key] = value.to(self.compute_dtype)
return features
@dataclass
class PairwiseDataCollatorWithPadding(MultiModalDataCollatorForSeq2Seq):
def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]:
concatenated_features = []
for key in ("chosen", "rejected"):
for feature in features:
target_feature = {
"input_ids": feature[f"{key}_input_ids"],
"attention_mask": feature[f"{key}_attention_mask"],
"labels": feature[f"{key}_labels"],
"images": feature["images"],
"videos": feature["videos"],
"audios": feature["audios"],
}
concatenated_features.append(target_feature)
return super().__call__(concatenated_features)
@dataclass
class KTODataCollatorWithPadding(MultiModalDataCollatorForSeq2Seq):
def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]:
target_features = []
kl_features = []
kto_tags = []
for feature in features:
target_feature = {
"input_ids": feature["input_ids"],
"attention_mask": feature["attention_mask"],
"labels": feature["labels"],
"images": feature["images"],
"videos": feature["videos"],
"audios": feature["audios"],
}
kl_feature = {
"input_ids": feature["kl_input_ids"],
"attention_mask": feature["kl_attention_mask"],
"labels": feature["kl_labels"],
"images": feature["images"],
"videos": feature["videos"],
"audios": feature["audios"],
}
target_features.append(target_feature)
kl_features.append(kl_feature)
kto_tags.append(feature["kto_tags"])
batch = super().__call__(target_features)
kl_batch = super().__call__(kl_features)
batch["kl_input_ids"] = kl_batch["input_ids"]
batch["kl_attention_mask"] = kl_batch["attention_mask"]
batch["kl_labels"] = kl_batch["labels"]
if "cross_attention_mask" in kl_batch: # for mllama inputs
batch["kl_cross_attention_mask"] = kl_batch["cross_attention_mask"]
if "token_type_ids" in kl_batch:
batch["kl_token_type_ids"] = kl_batch["token_type_ids"]
batch["kto_tags"] = torch.tensor(kto_tags)
return batch | --- +++ @@ -40,6 +40,31 @@
def prepare_4d_attention_mask(attention_mask_with_indices: "torch.Tensor", dtype: "torch.dtype") -> "torch.Tensor":
+ r"""Expand 2d attention mask to 4d attention mask.
+
+ Expand the attention mask with indices from (batch_size, seq_len) to (batch_size, 1, seq_len, seq_len),
+ handle packed sequences and transforms the mask to lower triangular form to prevent future peeking.
+
+ e.g.
+ ```python
+ # input
+ [[1, 1, 2, 2, 2, 0]]
+ # output
+ [
+ [
+ [
+ [o, x, x, x, x, x],
+ [o, o, x, x, x, x],
+ [x, x, o, x, x, x],
+ [x, x, o, o, x, x],
+ [x, x, o, o, o, x],
+ [x, x, x, x, x, x],
+ ]
+ ]
+ ]
+ ```
+ where `o` equals to `0.0`, `x` equals to `min_dtype`.
+ """
_, seq_len = attention_mask_with_indices.size()
min_dtype = torch.finfo(dtype).min
zero_tensor = torch.tensor(0, dtype=dtype)
@@ -59,6 +84,10 @@
@dataclass
class MultiModalDataCollatorForSeq2Seq(DataCollatorForSeq2Seq):
+ r"""Data collator that supports VLMs.
+
+ Features should contain input_ids, attention_mask, labels, and optionally contain images, videos and audios.
+ """
template: Optional["Template"] = None
processor: Optional["ProcessorMixin"] = None
@@ -227,6 +256,7 @@
@dataclass
class SFTDataCollatorWith4DAttentionMask(MultiModalDataCollatorForSeq2Seq):
+ r"""Data collator for 4d attention mask."""
block_diag_attn: bool = False
attn_implementation: Literal["eager", "sdpa", "flash_attention_2"] = "eager"
@@ -246,8 +276,14 @@
@dataclass
class PairwiseDataCollatorWithPadding(MultiModalDataCollatorForSeq2Seq):
+ r"""Data collator for pairwise data."""
def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]:
+ r"""Pad batched data to the longest sequence in the batch.
+
+ We generate 2 * n examples where the first n examples represent chosen examples and
+ the last n examples represent rejected examples.
+ """
concatenated_features = []
for key in ("chosen", "rejected"):
for feature in features:
@@ -266,6 +302,7 @@
@dataclass
class KTODataCollatorWithPadding(MultiModalDataCollatorForSeq2Seq):
+ r"""Data collator for KTO data."""
def __call__(self, features: list[dict[str, Any]]) -> dict[str, "torch.Tensor"]:
target_features = []
@@ -304,4 +341,4 @@ batch["kl_token_type_ids"] = kl_batch["token_type_ids"]
batch["kto_tags"] = torch.tensor(kto_tags)
- return batch+ return batch
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/data/collator.py |
Document all public functions with docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing_extensions import override
from .data_utils import SLOTS
from .tool_utils import FunctionCall, get_tool_utils
@dataclass
class Formatter(ABC):
slots: SLOTS = field(default_factory=list)
tool_format: str | None = None
@abstractmethod
def apply(self, **kwargs) -> SLOTS:
...
def extract(self, content: str) -> str | list["FunctionCall"]:
raise NotImplementedError
@dataclass
class EmptyFormatter(Formatter):
def __post_init__(self):
has_placeholder = False
for slot in filter(lambda s: isinstance(s, str), self.slots):
if re.search(r"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}", slot):
has_placeholder = True
if has_placeholder:
raise ValueError("Empty formatter should not contain any placeholder.")
@override
def apply(self, **kwargs) -> SLOTS:
return self.slots
@dataclass
class StringFormatter(Formatter):
def __post_init__(self):
has_placeholder = False
for slot in filter(lambda s: isinstance(s, str), self.slots):
if re.search(r"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}", slot):
has_placeholder = True
if not has_placeholder:
raise ValueError("A placeholder is required in the string formatter.")
@override
def apply(self, **kwargs) -> SLOTS:
elements = []
for slot in self.slots:
if isinstance(slot, str):
for name, value in kwargs.items():
if not isinstance(value, str):
raise RuntimeError(f"Expected a string, got {value}")
slot = slot.replace("{{" + name + "}}", value, 1)
elements.append(slot)
elif isinstance(slot, (dict, set)):
elements.append(slot)
else:
raise RuntimeError(f"Input must be string, set[str] or dict[str, str], got {type(slot)}.")
return elements
@dataclass
class FunctionFormatter(StringFormatter):
def __post_init__(self):
super().__post_init__()
self.tool_utils = get_tool_utils(self.tool_format)
@override
def apply(self, **kwargs) -> SLOTS:
content: str = kwargs.pop("content")
thought_words = kwargs.pop("thought_words", None)
tool_call_words = kwargs.pop("tool_call_words", None)
def _parse_functions(json_content: str) -> list["FunctionCall"]:
try:
tool_calls = json.loads(json_content)
if not isinstance(tool_calls, list): # parallel function call
tool_calls = [tool_calls]
return [FunctionCall(tc["name"], json.dumps(tc["arguments"], ensure_ascii=False)) for tc in tool_calls]
except json.JSONDecodeError:
raise RuntimeError(f"Invalid JSON format in function message: {str([content])}.")
tool_call_match = None
if tool_call_words and len(tool_call_words) == 2:
tool_call_regex = re.compile(
rf"{re.escape(tool_call_words[0])}(.*?){re.escape(tool_call_words[1])}", re.DOTALL
)
tool_call_match = re.search(tool_call_regex, content)
if tool_call_match is None:
thought_match = None
if thought_words and len(thought_words) == 2:
regex = re.compile(rf"{re.escape(thought_words[0])}(.*?){re.escape(thought_words[1])}", re.DOTALL)
thought_match = re.search(regex, content)
if thought_match:
json_part = content.replace(thought_match.group(0), "")
else:
json_part = content
functions = _parse_functions(json_part)
function_str = self.tool_utils.function_formatter(functions)
if thought_match:
function_str = thought_match.group(0) + function_str
else:
thought_content = content.replace(tool_call_match.group(0), "")
functions = _parse_functions(tool_call_match.group(1))
function_str = self.tool_utils.function_formatter(functions)
function_str = thought_content + function_str
return super().apply(content=function_str)
@dataclass
class ToolFormatter(Formatter):
def __post_init__(self):
self.tool_utils = get_tool_utils(self.tool_format)
@override
def apply(self, **kwargs) -> SLOTS:
content = kwargs.pop("content")
try:
tools = json.loads(content)
return [self.tool_utils.tool_formatter(tools) if len(tools) != 0 else ""]
except json.JSONDecodeError:
raise RuntimeError(f"Invalid JSON format in tool description: {str([content])}.") # flat string
@override
def extract(self, content: str) -> str | list["FunctionCall"]:
return self.tool_utils.tool_extractor(content) | --- +++ @@ -30,9 +30,14 @@
@abstractmethod
def apply(self, **kwargs) -> SLOTS:
+ r"""Forms a list of slots according to the inputs to encode."""
...
def extract(self, content: str) -> str | list["FunctionCall"]:
+ r"""Extract a list of tuples from the response message if using tools.
+
+ Each tuple consists of function name and function arguments.
+ """
raise NotImplementedError
@@ -151,4 +156,4 @@
@override
def extract(self, content: str) -> str | list["FunctionCall"]:
- return self.tool_utils.tool_extractor(content)+ return self.tool_utils.tool_extractor(content)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/data/formatter.py |
Add docstrings for better understanding | # Copyright 2025 HuggingFace Inc., Daniel Han-Chen & the Unsloth team and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's Transformers and PEFT library,
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/modeling_utils.py
# https://github.com/huggingface/peft/blob/v0.10.0/src/peft/utils/other.py
# and the Unsloth library.
# https://github.com/unslothai/unsloth/blob/July-2024/unsloth/models/_utils.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
from collections.abc import Callable
from functools import WRAPPER_ASSIGNMENTS, partial, wraps
from types import MethodType
from typing import TYPE_CHECKING, Any, Optional, Union
import torch
from ...extras import logging
from ...extras.constants import LAYERNORM_NAMES
if TYPE_CHECKING:
from transformers import PreTrainedModel
from ...hparams import ModelArguments
logger = logging.get_logger(__name__)
def get_unsloth_gradient_checkpointing_func() -> Callable:
class UnslothGradientCheckpointing(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(
ctx: "torch.autograd.Function",
forward_function: "torch.Module",
hidden_states: "torch.Tensor",
*args: Union["torch.Tensor", Any],
) -> "torch.Tensor":
saved_hidden_states = hidden_states.to("cpu", non_blocking=True)
with torch.no_grad():
outputs = forward_function(hidden_states, *args)
ctx.save_for_backward(saved_hidden_states)
ctx.forward_function = forward_function
ctx.args = args
return outputs
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx: "torch.autograd.Function", grad_output: "torch.Tensor") -> "torch.Tensor":
(hidden_states,) = ctx.saved_tensors
hidden_states = hidden_states.to("cuda", non_blocking=True).detach()
hidden_states.requires_grad_(True)
with torch.enable_grad():
outputs = ctx.forward_function(hidden_states, *ctx.args)
output = outputs[0] if isinstance(outputs, tuple) else outputs
torch.autograd.backward(output, grad_output)
return (None, hidden_states.grad) + (None,) * len(ctx.args)
return UnslothGradientCheckpointing.apply
def get_custom_gradient_checkpointing_func(gradient_checkpointing_func: Callable) -> Callable:
@wraps(gradient_checkpointing_func, assigned=WRAPPER_ASSIGNMENTS + ("__self__",))
def custom_gradient_checkpointing_func(func: Callable, *args: Union["torch.Tensor", Any], **kwargs):
if isinstance(func, partial):
module: torch.nn.Module = func.func.__self__
else:
module: torch.nn.Module = func.__self__
has_grad = False
if any(param.requires_grad for param in module.parameters()):
has_grad = True
for arg in args:
if torch.is_tensor(arg) and torch.is_floating_point(arg):
arg.requires_grad_(True)
break # assume the first tensor is always the hidden states
if has_grad:
return gradient_checkpointing_func(func, *args, **kwargs)
else:
return func(*args, **kwargs)
return custom_gradient_checkpointing_func
def _gradient_checkpointing_enable(
self: "PreTrainedModel",
gradient_checkpointing_kwargs: Optional[dict[str, Any]] = None,
use_unsloth_gc: bool = False,
) -> None:
from torch.utils.checkpoint import checkpoint
if not self.supports_gradient_checkpointing:
raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
if gradient_checkpointing_kwargs is None:
gradient_checkpointing_kwargs = {"use_reentrant": True}
if use_unsloth_gc:
gradient_checkpointing_func = get_unsloth_gradient_checkpointing_func()
else:
gradient_checkpointing_func = partial(checkpoint, **gradient_checkpointing_kwargs)
gradient_checkpointing_func = get_custom_gradient_checkpointing_func(gradient_checkpointing_func)
if "value" in inspect.signature(self._set_gradient_checkpointing).parameters: # old GC format
self.apply(partial(self._set_gradient_checkpointing, value=True))
self.enable_input_require_grads()
logger.warning_rank0_once("You are using the old GC format, some features (e.g. BAdam) will be invalid.")
else: # have already enabled input require gradients
self._set_gradient_checkpointing(enable=True, gradient_checkpointing_func=gradient_checkpointing_func)
def _fp32_forward_post_hook(
module: "torch.nn.Module", args: tuple["torch.Tensor"], output: "torch.Tensor"
) -> "torch.Tensor":
return output.to(torch.float32)
def prepare_model_for_training(model: "PreTrainedModel", model_args: "ModelArguments") -> None:
if model_args.upcast_layernorm:
logger.info_rank0("Upcasting layernorm weights in float32.")
for name, param in model.named_parameters():
if param.ndim == 1 and any(ln_name in name for ln_name in LAYERNORM_NAMES):
param.data = param.data.to(torch.float32)
if (
os.environ.get("ACCELERATE_USE_FSDP", "false").lower() == "true"
and int(os.environ.get("FSDP_VERSION", "1")) == 2
):
model_args.use_reentrant_gc = False
logger.warning_rank0("You are using fsdp2, `use_reentrant_gc` has been set to False.")
if not model_args.disable_gradient_checkpointing:
if not getattr(model, "supports_gradient_checkpointing", False):
logger.warning_rank0("Current model does not support gradient checkpointing.")
else:
# use_reentrant=False might increase VRAM usage (have not been empirically verified yet)
# According to: https://github.com/huggingface/transformers/issues/28339
gradient_checkpointing_enable = partial(
_gradient_checkpointing_enable, use_unsloth_gc=model_args.use_unsloth_gc
)
model.gradient_checkpointing_enable = MethodType(gradient_checkpointing_enable, model)
model.gradient_checkpointing_enable(
gradient_checkpointing_kwargs={"use_reentrant": model_args.use_reentrant_gc}
)
setattr(model.config, "use_cache", False) # turn off when gradient checkpointing is enabled
logger.info_rank0("Gradient checkpointing enabled.")
if model_args.upcast_lmhead_output:
output_layer = model.get_output_embeddings()
if isinstance(output_layer, torch.nn.Linear) and output_layer.weight.dtype != torch.float32:
logger.info_rank0("Upcasting lm_head outputs in float32.")
output_layer.register_forward_hook(_fp32_forward_post_hook) | --- +++ @@ -42,6 +42,7 @@
def get_unsloth_gradient_checkpointing_func() -> Callable:
class UnslothGradientCheckpointing(torch.autograd.Function):
+ r"""Saves VRAM by smartly offloading to RAM."""
@staticmethod
@torch.cuda.amp.custom_fwd
@@ -77,6 +78,7 @@
def get_custom_gradient_checkpointing_func(gradient_checkpointing_func: Callable) -> Callable:
+ r"""Only applies gradient checkpointing to trainable layers."""
@wraps(gradient_checkpointing_func, assigned=WRAPPER_ASSIGNMENTS + ("__self__",))
def custom_gradient_checkpointing_func(func: Callable, *args: Union["torch.Tensor", Any], **kwargs):
@@ -106,6 +108,10 @@ gradient_checkpointing_kwargs: Optional[dict[str, Any]] = None,
use_unsloth_gc: bool = False,
) -> None:
+ r"""Activates gradient checkpointing for the current model.
+
+ Modification of the original method to enable gradient checkpointing for block-wise optimizer.
+ """
from torch.utils.checkpoint import checkpoint
if not self.supports_gradient_checkpointing:
@@ -135,6 +141,13 @@
def prepare_model_for_training(model: "PreTrainedModel", model_args: "ModelArguments") -> None:
+ r"""Prepare the model before training.
+
+ Include:
+ (1) cast the layernorm in fp32
+ (2) make output embedding layer require grads
+ (3) add the upcasting of the lm_head in fp32.
+ """
if model_args.upcast_layernorm:
logger.info_rank0("Upcasting layernorm weights in float32.")
for name, param in model.named_parameters():
@@ -168,4 +181,4 @@ output_layer = model.get_output_embeddings()
if isinstance(output_layer, torch.nn.Linear) and output_layer.weight.dtype != torch.float32:
logger.info_rank0("Upcasting lm_head outputs in float32.")
- output_layer.register_forward_hook(_fp32_forward_post_hook)+ output_layer.register_forward_hook(_fp32_forward_post_hook)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/model/model_utils/checkpointing.py |
Write docstrings describing each step | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's TRL library.
# https://github.com/huggingface/trl/blob/v0.8.0/trl/trainer/dpo_trainer.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from collections import defaultdict
from contextlib import nullcontext
from types import MethodType
from typing import TYPE_CHECKING, Literal, Optional, Union
import torch
import torch.nn.functional as F
from transformers import Trainer
from trl import DPOTrainer
from trl.models.utils import prepare_deepspeed, prepare_fsdp
from trl.trainer import disable_dropout_in_model
from typing_extensions import override
from ...extras.constants import IGNORE_INDEX
from ...extras.packages import is_transformers_version_greater_than
from ..callbacks import SaveProcessorCallback
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler, get_batch_logps, nested_detach
if TYPE_CHECKING:
from transformers import PreTrainedModel, ProcessorMixin
from ...hparams import FinetuningArguments
class CustomDPOTrainer(DPOTrainer):
def __init__(
self,
model: Union["PreTrainedModel", torch.nn.Module],
ref_model: Optional[Union["PreTrainedModel", torch.nn.Module]],
finetuning_args: "FinetuningArguments",
processor: Optional["ProcessorMixin"],
disable_dropout: bool = True,
**kwargs,
):
if is_transformers_version_greater_than("4.46"):
kwargs["processing_class"] = kwargs.pop("tokenizer")
if disable_dropout:
disable_dropout_in_model(model)
if ref_model is not None:
disable_dropout_in_model(ref_model)
self.finetuning_args = finetuning_args
self.f_divergence_type = "reverse_kl"
self.reference_free = False
self.use_dpo_data_collator = True # hack to avoid warning
self.generate_during_eval = False # disable at evaluation
self.label_pad_token_id = IGNORE_INDEX
self.padding_value = 0
self.is_encoder_decoder = model.config.is_encoder_decoder
self.precompute_ref_log_probs = False
self._precomputed_train_ref_log_probs = False
self._precomputed_eval_ref_log_probs = False
self._peft_has_been_casted_to_bf16 = False
self.ref_model = ref_model
self._stored_metrics = defaultdict(lambda: defaultdict(list))
# dpo hyperparams
self.beta = finetuning_args.pref_beta
self.loss_type = finetuning_args.pref_loss
self.ftx_gamma = finetuning_args.pref_ftx
self.bco_gemma = finetuning_args.pref_bco_weight
self.label_smoothing = finetuning_args.dpo_label_smoothing
self.simpo_gamma = finetuning_args.simpo_gamma
self.ld_alpha = finetuning_args.ld_alpha
Trainer.__init__(self, model=model, **kwargs)
self.model_accepts_loss_kwargs = False # overwrite trainer's default behavior
if not hasattr(self, "accelerator"):
raise AttributeError("Please update `transformers`.")
warnings.simplefilter("ignore") # remove gc warnings on ref model
if ref_model is not None:
if self.is_deepspeed_enabled:
if not (
getattr(ref_model, "is_loaded_in_8bit", False) or getattr(ref_model, "is_loaded_in_4bit", False)
): # quantized models are already set on the correct device
self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator)
elif self.is_fsdp_enabled:
if self.accelerator.is_fsdp2:
from accelerate.utils.fsdp_utils import fsdp2_prepare_model
self.ref_model = fsdp2_prepare_model(self.accelerator, self.ref_model)
else:
self.ref_model = prepare_fsdp(self.ref_model, self.accelerator)
else:
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
self.ref_model.eval()
if processor is not None:
self.add_callback(SaveProcessorCallback(processor))
if finetuning_args.use_badam:
from badam import BAdamCallback, clip_grad_norm_old_version # type: ignore
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
self.add_callback(BAdamCallback)
if self.bco_gemma >= 1e-6:
from trl.trainer import RunningMoments
self.running = RunningMoments(self.accelerator)
@override
def create_optimizer(self) -> "torch.optim.Optimizer":
if self.optimizer is None:
self.optimizer = create_custom_optimizer(self.model, self.args, self.finetuning_args)
return super().create_optimizer()
@override
def create_scheduler(
self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None
) -> "torch.optim.lr_scheduler.LRScheduler":
create_custom_scheduler(self.args, num_training_steps, optimizer)
return super().create_scheduler(num_training_steps, optimizer)
@override
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
if self.finetuning_args.disable_shuffling:
return torch.utils.data.SequentialSampler(self.train_dataset)
return super()._get_train_sampler(*args, **kwargs)
@override
def get_batch_samples(self, *args, **kwargs):
return Trainer.get_batch_samples(self, *args, **kwargs)
def odds_ratio_loss(self, chosen_logps: "torch.Tensor", rejected_logps: "torch.Tensor") -> "torch.Tensor":
log_odds = (chosen_logps - rejected_logps) - (
torch.log1p(-torch.exp(chosen_logps)) - torch.log1p(-torch.exp(rejected_logps))
)
sft_loss = -chosen_logps
odds_ratio_loss = -F.logsigmoid(log_odds)
orpo_loss = sft_loss + self.beta * odds_ratio_loss
return orpo_loss
def simpo_loss(self, chosen_logps: "torch.Tensor", rejected_logps: "torch.Tensor") -> "torch.Tensor":
pi_logratios = chosen_logps - rejected_logps
gamma_logratios = self.simpo_gamma / self.beta
logits = pi_logratios - gamma_logratios
simpo_loss = -F.logsigmoid(self.beta * logits)
return simpo_loss
def bco_loss(
self,
chosen_logps: "torch.Tensor",
rejected_logps: "torch.Tensor",
reference_chosen_logps: "torch.Tensor",
reference_rejected_logps: "torch.Tensor",
) -> "torch.Tensor":
chosen_logratios = chosen_logps - reference_chosen_logps
rejected_logratios = rejected_logps - reference_rejected_logps
chosen_rewards = self.beta * chosen_logratios
rejected_rewards = self.beta * rejected_logratios
rewards = torch.cat((chosen_rewards, rejected_rewards), 0).mean().detach()
self.running.update(rewards) # update baseline
delta = self.running.mean
bco_loss = -F.logsigmoid((self.beta * chosen_logratios) - delta) - F.logsigmoid(
-(self.beta * rejected_logratios - delta)
)
return bco_loss
def compute_preference_loss(
self,
policy_chosen_logps: "torch.Tensor",
policy_rejected_logps: "torch.Tensor",
reference_chosen_logps: Optional["torch.Tensor"],
reference_rejected_logps: Optional["torch.Tensor"],
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]:
if not self.finetuning_args.use_ref_model:
if self.loss_type == "orpo":
losses = self.odds_ratio_loss(policy_chosen_logps, policy_rejected_logps)
elif self.loss_type == "simpo":
losses = self.simpo_loss(policy_chosen_logps, policy_rejected_logps)
else:
raise NotImplementedError(f"Unknown loss type: {self.loss_type}.")
chosen_rewards = self.beta * policy_chosen_logps.to(self.accelerator.device).detach()
rejected_rewards = self.beta * policy_rejected_logps.to(self.accelerator.device).detach()
else:
losses, chosen_rewards, rejected_rewards = self.dpo_loss(
policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps
)
if self.bco_gemma > 1e-6:
bco_losses = self.bco_loss(
policy_chosen_logps, policy_rejected_logps, reference_chosen_logps, reference_rejected_logps
)
losses = (losses + bco_losses * self.bco_gemma) / (1.0 + self.bco_gemma) # re-weight W_p and W_q
return losses, chosen_rewards, rejected_rewards
@override
def concatenated_forward(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"], is_ref_model: bool = False
) -> dict[str, "torch.Tensor"]:
if self.finetuning_args.use_ref_model:
batch = nested_detach(batch, clone=True) # avoid error
labels = batch.pop("labels") # dpo do not need compute loss in forward
all_logits: torch.Tensor = model(**batch, return_dict=True, use_cache=False).logits.to(torch.float32)
all_logps, valid_length = get_batch_logps(
logits=all_logits, labels=labels, ld_alpha=(self.ld_alpha if not is_ref_model else None)
)
if self.loss_type in ["ipo", "orpo", "simpo"]:
all_logps = all_logps / valid_length
batch_size = batch["input_ids"].size(0) // 2
chosen_logps, rejected_logps = all_logps.split(batch_size, dim=0)
chosen_logits, rejected_logits = all_logits.split(batch_size, dim=0)
chosen_length, _ = valid_length.split(batch_size, dim=0)
if self.loss_type in ["ipo", "orpo", "simpo"]:
chosen_logps_avg = chosen_logps
else:
chosen_logps_avg = chosen_logps / chosen_length
return {
"chosen_logps": chosen_logps,
"rejected_logps": rejected_logps,
"chosen_logits": chosen_logits,
"rejected_logits": rejected_logits,
"chosen_logps_avg": chosen_logps_avg,
}
@override
def compute_reference_log_probs(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"]
) -> tuple[Optional["torch.Tensor"], Optional["torch.Tensor"]]:
if not self.finetuning_args.use_ref_model:
return None, None
if self.ref_model is None:
ref_model = model
ref_context = self.accelerator.unwrap_model(model).disable_adapter()
else:
ref_model = self.ref_model
ref_context = nullcontext()
with torch.no_grad(), ref_context:
ref_output = self.concatenated_forward(ref_model, batch, is_ref_model=True)
reference_chosen_logps = ref_output["chosen_logps"]
reference_rejected_logps = ref_output["rejected_logps"]
return reference_chosen_logps, reference_rejected_logps
@override
def get_batch_loss_metrics(
self,
model: "PreTrainedModel",
batch: dict[str, "torch.Tensor"],
train_eval: Literal["train", "eval"] = "train",
) -> tuple["torch.Tensor", dict[str, "torch.Tensor"]]:
metrics = {}
model_output = self.concatenated_forward(model, batch)
policy_chosen_logps = model_output["chosen_logps"]
policy_rejected_logps = model_output["rejected_logps"]
policy_chosen_logits = model_output["chosen_logits"]
policy_rejected_logits = model_output["rejected_logits"]
policy_chosen_logps_avg = model_output["chosen_logps_avg"]
reference_chosen_logps, reference_rejected_logps = self.compute_reference_log_probs(model, batch)
losses, chosen_rewards, rejected_rewards = self.compute_preference_loss(
policy_chosen_logps,
policy_rejected_logps,
reference_chosen_logps,
reference_rejected_logps,
)
sft_loss = -policy_chosen_logps_avg
if self.ftx_gamma > 1e-6:
losses += self.ftx_gamma * sft_loss
prefix = "eval_" if train_eval == "eval" else ""
metrics[f"{prefix}rewards/chosen"] = chosen_rewards.mean().item()
metrics[f"{prefix}rewards/rejected"] = rejected_rewards.mean().item()
metrics[f"{prefix}rewards/accuracies"] = (chosen_rewards > rejected_rewards).float().mean().item()
metrics[f"{prefix}rewards/margins"] = (chosen_rewards - rejected_rewards).mean().item()
metrics[f"{prefix}logps/chosen"] = policy_chosen_logps.mean().item()
metrics[f"{prefix}logps/rejected"] = policy_rejected_logps.mean().item()
metrics[f"{prefix}logits/chosen"] = policy_chosen_logits.mean().item()
metrics[f"{prefix}logits/rejected"] = policy_rejected_logits.mean().item()
if self.loss_type == "orpo":
metrics[f"{prefix}sft_loss"] = sft_loss.mean().item()
metrics[f"{prefix}odds_ratio_loss"] = ((losses - sft_loss) / self.beta).mean().item()
return losses.mean(), metrics
@override
def compute_loss(
self, model: "PreTrainedModel", inputs: dict[str, "torch.Tensor"], return_outputs: bool = False, **kwargs
) -> Union["torch.Tensor", tuple["torch.Tensor", list["torch.Tensor"]]]:
return super().compute_loss(model, inputs, return_outputs)
@override
def log(self, logs: dict[str, float], *args, **kwargs) -> None:
# logs either has "loss" or "eval_loss"
train_eval = "train" if "loss" in logs else "eval"
# Add averaged stored metrics to logs
key_list, metric_list = [], []
for key, metrics in self._stored_metrics[train_eval].items():
key_list.append(key)
metric_list.append(torch.tensor(metrics, dtype=torch.float).to(self.accelerator.device).mean().item())
del self._stored_metrics[train_eval]
if len(metric_list) < 10: # pad to for all reduce
for i in range(10 - len(metric_list)):
key_list.append(f"dummy_{i}")
metric_list.append(0.0)
metric_list = torch.tensor(metric_list, dtype=torch.float).to(self.accelerator.device)
metric_list = self.accelerator.reduce(metric_list, "mean").tolist()
for key, metric in zip(key_list, metric_list): # add remaining items
if not key.startswith("dummy_"):
logs[key] = metric
return Trainer.log(self, logs, *args, **kwargs) | --- +++ @@ -144,9 +144,11 @@
@override
def get_batch_samples(self, *args, **kwargs):
+ r"""Replace the method of DPO Trainer with the one of the standard Trainer."""
return Trainer.get_batch_samples(self, *args, **kwargs)
def odds_ratio_loss(self, chosen_logps: "torch.Tensor", rejected_logps: "torch.Tensor") -> "torch.Tensor":
+ r"""Compute ORPO's odds ratio (OR) loss for batched log probabilities of the policy model."""
log_odds = (chosen_logps - rejected_logps) - (
torch.log1p(-torch.exp(chosen_logps)) - torch.log1p(-torch.exp(rejected_logps))
)
@@ -156,6 +158,7 @@ return orpo_loss
def simpo_loss(self, chosen_logps: "torch.Tensor", rejected_logps: "torch.Tensor") -> "torch.Tensor":
+ r"""Compute SimPO loss for batched log probabilities of the policy model."""
pi_logratios = chosen_logps - rejected_logps
gamma_logratios = self.simpo_gamma / self.beta
logits = pi_logratios - gamma_logratios
@@ -188,6 +191,7 @@ reference_chosen_logps: Optional["torch.Tensor"],
reference_rejected_logps: Optional["torch.Tensor"],
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]:
+ r"""Compute loss for preference learning."""
if not self.finetuning_args.use_ref_model:
if self.loss_type == "orpo":
losses = self.odds_ratio_loss(policy_chosen_logps, policy_rejected_logps)
@@ -215,6 +219,10 @@ def concatenated_forward(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"], is_ref_model: bool = False
) -> dict[str, "torch.Tensor"]:
+ r"""Compute the sum log probabilities of the labels under given logits if loss_type is not IPO, ORPO or SimPO.
+
+ Otherwise the average log probabilities.
+ """
if self.finetuning_args.use_ref_model:
batch = nested_detach(batch, clone=True) # avoid error
@@ -247,6 +255,7 @@ def compute_reference_log_probs(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"]
) -> tuple[Optional["torch.Tensor"], Optional["torch.Tensor"]]:
+ r"""Compute log probabilities of the reference model."""
if not self.finetuning_args.use_ref_model:
return None, None
@@ -271,6 +280,7 @@ batch: dict[str, "torch.Tensor"],
train_eval: Literal["train", "eval"] = "train",
) -> tuple["torch.Tensor", dict[str, "torch.Tensor"]]:
+ r"""Compute the DPO loss and other metrics for the given batch of inputs for train or test."""
metrics = {}
model_output = self.concatenated_forward(model, batch)
@@ -310,10 +320,12 @@ def compute_loss(
self, model: "PreTrainedModel", inputs: dict[str, "torch.Tensor"], return_outputs: bool = False, **kwargs
) -> Union["torch.Tensor", tuple["torch.Tensor", list["torch.Tensor"]]]:
+ r"""Subclass and override to accept extra kwargs."""
return super().compute_loss(model, inputs, return_outputs)
@override
def log(self, logs: dict[str, float], *args, **kwargs) -> None:
+ r"""Log `logs` on the various objects watching training, including stored metrics."""
# logs either has "loss" or "eval_loss"
train_eval = "train" if "loss" in logs else "eval"
# Add averaged stored metrics to logs
@@ -334,4 +346,4 @@ if not key.startswith("dummy_"):
logs[key] = metric
- return Trainer.log(self, logs, *args, **kwargs)+ return Trainer.log(self, logs, *args, **kwargs)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/dpo/trainer.py |
Add docstrings with type hints explained | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections.abc import Iterable
from typing import Any
from huggingface_hub import hf_hub_download
from omegaconf import OmegaConf
from torch.utils.data import Dataset
from ..utils.types import DatasetInfo, HFDataset, Sample
class DataEngine(Dataset):
def __init__(self, dataset_path: str) -> None:
self.path = dataset_path
"""Dataset path."""
self.datasets: dict[str, HFDataset] = {}
"""Dict of (dataset_name, dataset)"""
self.dataset_infos: dict[str, DatasetInfo] = {}
"""Dict of (dataset_name, dataset_info)"""
self.data_index: list[tuple[str, int]] = []
"""List of (dataset_name, sample_index)"""
self.streaming: bool = False
"""Whether dataset is streaming."""
self._get_dataset_info()
self._load_dataset()
self._build_data_index()
def _get_dataset_info(self) -> None:
if self.path.endswith(".yaml") and os.path.isfile(self.path): # local file
self.dataset_infos = OmegaConf.load(self.path)
elif self.path.endswith(".yaml"): # hf hub uri, e.g. llamafactory/v1-sft-demo/dataset_info.yaml
repo_id, filename = os.path.split(self.path)
filepath = hf_hub_download(repo_id=repo_id, filename=filename, repo_type="dataset")
self.dataset_infos = OmegaConf.load(filepath)
elif os.path.exists(self.path): # local file(s)
self.dataset_infos = {"default": {"path": self.path, "source": "local"}}
else: # hf hub dataset, e.g. llamafactory/v1-sft-demo
self.dataset_infos = {"default": {"path": self.path}}
def _load_dataset(self) -> None:
is_streaming = [dataset_info.get("streaming", False) for dataset_info in self.dataset_infos.values()]
self.streaming = any(is_streaming)
if all(is_streaming) != any(is_streaming):
raise ValueError("All datasets must be streaming or non-streaming.")
for dataset_name, dataset_info in self.dataset_infos.items():
split = dataset_info.get("split", "train")
if dataset_info.get("source", "hf_hub") == "hf_hub":
from datasets import load_dataset
self.datasets[dataset_name] = load_dataset(dataset_info["path"], split=split, streaming=self.streaming)
else: # data loader plugin
from ..plugins.data_plugins.loader import DataLoaderPlugin
self.datasets[dataset_name] = DataLoaderPlugin(dataset_info["source"]).load(dataset_info)
def _build_data_index(self) -> None:
for dataset_name, dataset in self.datasets.items():
if self.streaming:
data_index = [(dataset_name, -1) for _ in range(1000)]
else:
data_index = [(dataset_name, sample_index) for sample_index in range(len(dataset))]
size = self.dataset_infos[dataset_name].get("size")
weight = self.dataset_infos[dataset_name].get("weight")
if size or weight:
from ..plugins.data_plugins.loader import adjust_data_index
data_index = adjust_data_index(data_index, size, weight)
self.data_index.extend(data_index)
def _convert_data_sample(self, raw_sample: dict[str, Any], dataset_name: str) -> Sample:
converter = self.dataset_infos[dataset_name].get("converter")
if converter is not None:
from ..plugins.data_plugins.converter import DataConverterPlugin
return {"_dataset_name": dataset_name, **DataConverterPlugin(converter)(raw_sample)}
else:
return {"_dataset_name": dataset_name, **raw_sample}
def __len__(self) -> int:
if self.streaming:
return -1
else:
return len(self.data_index)
def __getitem__(self, index: int | Any) -> Sample | list[Sample]:
if self.streaming:
raise ValueError("Streaming dataset does not support index access.")
if isinstance(index, int):
dataset_name, sample_index = self.data_index[index]
return self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name)
else: # data selector plugin
from ..plugins.data_plugins.loader import select_data_sample
selected_index = select_data_sample(self.data_index, index)
if isinstance(selected_index, list):
return [
self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name)
for dataset_name, sample_index in selected_index
]
else:
dataset_name, sample_index = selected_index
return self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name)
def __iter__(self) -> Iterable[Sample]:
# NOTE: hf iterable dataset uses worker ids while map dataset does not
# NOTE: add worker id and shuffle to the map dataset
# https://github.com/huggingface/datasets/blob/4.0.0/src/datasets/iterable_dataset.py#L2214
raise NotImplementedError()
if __name__ == "__main__":
"""
python -m llamafactory.v1.core.data_engine --train_dataset data/v1_sft_demo.yaml
python -m llamafactory.v1.core.data_engine --train_dataset data/v1_dpo_demo.yaml
"""
from ..config.arg_parser import get_args
_, data_args, *_ = get_args()
data_engine = DataEngine(data_args.train_dataset)
print(data_engine[0]) | --- +++ @@ -12,6 +12,26 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of data engine.
+
+How to use:
+data_engine = DataEngine(data_args.train_dataset)
+data_engine[i]: Get the sample via index.
+
+Init workflow:
+1. Parse dataset info from arguments.
+2. Load datasets according to dataset info.
+3. Build data index (and reweight samples if necessary).
+
+Get data sample:
+1. Get sample from data index.
+2. Convert sample to standard format.
+3. Return sample.
+
+Note:
+1. The data engine is equivalent to the torch dataset.
+2. The data engine is agnostic to the model used.
+"""
import os
from collections.abc import Iterable
@@ -25,6 +45,11 @@
class DataEngine(Dataset):
+ """Data engine.
+
+ Args:
+ data_args: Data arguments.
+ """
def __init__(self, dataset_path: str) -> None:
self.path = dataset_path
@@ -42,6 +67,7 @@ self._build_data_index()
def _get_dataset_info(self) -> None:
+ """Get dataset info from data arguments."""
if self.path.endswith(".yaml") and os.path.isfile(self.path): # local file
self.dataset_infos = OmegaConf.load(self.path)
elif self.path.endswith(".yaml"): # hf hub uri, e.g. llamafactory/v1-sft-demo/dataset_info.yaml
@@ -54,6 +80,7 @@ self.dataset_infos = {"default": {"path": self.path}}
def _load_dataset(self) -> None:
+ """Load datasets according to dataset info."""
is_streaming = [dataset_info.get("streaming", False) for dataset_info in self.dataset_infos.values()]
self.streaming = any(is_streaming)
if all(is_streaming) != any(is_streaming):
@@ -71,6 +98,7 @@ self.datasets[dataset_name] = DataLoaderPlugin(dataset_info["source"]).load(dataset_info)
def _build_data_index(self) -> None:
+ """Build dataset index."""
for dataset_name, dataset in self.datasets.items():
if self.streaming:
data_index = [(dataset_name, -1) for _ in range(1000)]
@@ -87,6 +115,15 @@ self.data_index.extend(data_index)
def _convert_data_sample(self, raw_sample: dict[str, Any], dataset_name: str) -> Sample:
+ """Convert dataset sample.
+
+ Args:
+ raw_sample (dict[str, Any]): Raw dataset sample.
+ dataset_name (str): Dataset name.
+
+ Returns:
+ Sample: Dataset sample.
+ """
converter = self.dataset_infos[dataset_name].get("converter")
if converter is not None:
from ..plugins.data_plugins.converter import DataConverterPlugin
@@ -96,12 +133,25 @@ return {"_dataset_name": dataset_name, **raw_sample}
def __len__(self) -> int:
+ """Get dataset length.
+
+ Returns:
+ int: Dataset length.
+ """
if self.streaming:
return -1
else:
return len(self.data_index)
def __getitem__(self, index: int | Any) -> Sample | list[Sample]:
+ """Get dataset item.
+
+ Args:
+ index (int): Dataset index.
+
+ Returns:
+ Sample: Dataset item.
+ """
if self.streaming:
raise ValueError("Streaming dataset does not support index access.")
@@ -122,6 +172,11 @@ return self._convert_data_sample(self.datasets[dataset_name][sample_index], dataset_name)
def __iter__(self) -> Iterable[Sample]:
+ """Get dataset iterator.
+
+ Returns:
+ Iterable[Sample]: Dataset iterator.
+ """
# NOTE: hf iterable dataset uses worker ids while map dataset does not
# NOTE: add worker id and shuffle to the map dataset
# https://github.com/huggingface/datasets/blob/4.0.0/src/datasets/iterable_dataset.py#L2214
@@ -138,4 +193,4 @@
_, data_args, *_ = get_args()
data_engine = DataEngine(data_args.train_dataset)
- print(data_engine[0])+ print(data_engine[0])
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/core/data_engine.py |
Add detailed docstrings explaining each function | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the original GaLore's implementation: https://github.com/jiaweizzhao/GaLore
# and the original LoRA+'s implementation: https://github.com/nikhil-ghosh-berkeley/loraplus
# and the original BAdam's implementation: https://github.com/Ledzy/BAdam
# and the HuggingFace's TRL library: https://github.com/huggingface/trl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections.abc import Callable, Mapping
from typing import TYPE_CHECKING, Any, Optional, Union
import torch
import torch.nn.functional as F
from transformers import Trainer
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.modeling_utils import is_fsdp_enabled
from transformers.optimization import get_scheduler
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
from transformers.trainer_pt_utils import get_parameter_names
from typing_extensions import override
from ..extras import logging
from ..extras.constants import IGNORE_INDEX, SWANLAB_CONFIG
from ..extras.misc import get_device_name
from ..extras.packages import is_apollo_available, is_galore_available, is_ray_available
from ..hparams import FinetuningArguments, ModelArguments
from ..model import find_all_linear_modules, load_model, load_tokenizer, load_valuehead_params
if is_galore_available():
from galore_torch import GaLoreAdafactor, GaLoreAdamW, GaLoreAdamW8bit # type: ignore
if is_apollo_available():
from apollo_torch import APOLLOAdamW # type: ignore
if is_ray_available():
import ray
from ray.util.state import list_nodes
from ray.util.placement_group import PlacementGroup, placement_group
from ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy
if TYPE_CHECKING:
from transformers import PreTrainedModel, TrainerCallback, TrainerState
from trl import AutoModelForCausalLMWithValueHead
from ..hparams import DataArguments, TrainingArguments
logger = logging.get_logger(__name__)
class DummyOptimizer(torch.optim.Optimizer):
def __init__(
self, lr: float = 1e-3, optimizer_dict: Optional[dict["torch.nn.Parameter", "torch.optim.Optimizer"]] = None
) -> None:
dummy_tensor = torch.randn(1, 1)
self.optimizer_dict = optimizer_dict
super().__init__([dummy_tensor], {"lr": lr})
@override
def zero_grad(self, set_to_none: bool = True) -> None:
pass
@override
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
pass
def create_modelcard_and_push(
trainer: "Trainer",
model_args: "ModelArguments",
data_args: "DataArguments",
training_args: "TrainingArguments",
finetuning_args: "FinetuningArguments",
) -> None:
kwargs = {
"tasks": "text-generation",
"finetuned_from": model_args.model_name_or_path,
"tags": ["llama-factory", finetuning_args.finetuning_type],
}
if data_args.dataset is not None:
kwargs["dataset"] = data_args.dataset
if model_args.use_unsloth:
kwargs["tags"] = kwargs["tags"] + ["unsloth"]
if model_args.use_kt:
kwargs["tags"] = kwargs["tags"] + ["ktransformers"]
if not training_args.do_train:
pass
elif training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
Trainer.create_model_card(trainer, license="other", **kwargs) # prevent from connecting to hub
def create_ref_model(
model_args: "ModelArguments", finetuning_args: "FinetuningArguments", add_valuehead: bool = False
) -> Optional[Union["PreTrainedModel", "AutoModelForCausalLMWithValueHead"]]:
if finetuning_args.ref_model is not None:
ref_model_args = ModelArguments.copyfrom(
model_args,
model_name_or_path=finetuning_args.ref_model,
adapter_name_or_path=finetuning_args.ref_model_adapters,
quantization_bit=finetuning_args.ref_model_quantization_bit,
)
ref_finetuning_args = FinetuningArguments()
tokenizer = load_tokenizer(ref_model_args)["tokenizer"]
ref_model = load_model(
tokenizer, ref_model_args, ref_finetuning_args, is_trainable=False, add_valuehead=add_valuehead
)
logger.info_rank0(f"Created reference model from {finetuning_args.ref_model}")
else:
if finetuning_args.finetuning_type == "lora":
ref_model = None
else:
ref_model_args = ModelArguments.copyfrom(model_args)
ref_finetuning_args = FinetuningArguments()
tokenizer = load_tokenizer(ref_model_args)["tokenizer"]
ref_model = load_model(
tokenizer, ref_model_args, ref_finetuning_args, is_trainable=False, add_valuehead=add_valuehead
)
logger.info_rank0("Created reference model from the model itself.")
return ref_model
def create_reward_model(
model: "AutoModelForCausalLMWithValueHead", model_args: "ModelArguments", finetuning_args: "FinetuningArguments"
) -> Optional["AutoModelForCausalLMWithValueHead"]:
if finetuning_args.reward_model_type == "api":
assert finetuning_args.reward_model.startswith("http"), "Please provide full url."
logger.info_rank0(f"Use reward server {finetuning_args.reward_model}")
return finetuning_args.reward_model
elif finetuning_args.reward_model_type == "lora":
model.pretrained_model.load_adapter(finetuning_args.reward_model, "reward")
for name, param in model.named_parameters(): # https://github.com/huggingface/peft/issues/1090
if "default" in name:
param.data = param.data.to(torch.float32) # trainable params should in fp32
vhead_params = load_valuehead_params(finetuning_args.reward_model, model_args)
assert vhead_params is not None, "Reward model is not correctly loaded."
model.register_buffer("reward_head_weight", vhead_params["v_head.summary.weight"], persistent=False)
model.register_buffer("reward_head_bias", vhead_params["v_head.summary.bias"], persistent=False)
model.register_buffer(
"default_head_weight", torch.zeros_like(vhead_params["v_head.summary.weight"]), persistent=False
)
model.register_buffer(
"default_head_bias", torch.zeros_like(vhead_params["v_head.summary.bias"]), persistent=False
)
logger.info_rank0(f"Loaded adapter weights of reward model from {finetuning_args.reward_model}")
return None
else:
reward_model_args = ModelArguments.copyfrom(
model_args,
model_name_or_path=finetuning_args.reward_model,
adapter_name_or_path=finetuning_args.reward_model_adapters,
quantization_bit=finetuning_args.reward_model_quantization_bit,
)
reward_finetuning_args = FinetuningArguments()
tokenizer = load_tokenizer(reward_model_args)["tokenizer"]
reward_model = load_model(
tokenizer, reward_model_args, reward_finetuning_args, is_trainable=False, add_valuehead=True
)
logger.info_rank0(f"Loaded full weights of reward model from {finetuning_args.reward_model}")
logger.warning_rank0("Please ensure the ppo model and reward model share SAME tokenizer and vocabulary.")
return reward_model
def _get_decay_parameter_names(model: "PreTrainedModel") -> list[str]:
decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
return decay_parameters
def _create_galore_optimizer(
model: "PreTrainedModel",
training_args: "TrainingArguments",
finetuning_args: "FinetuningArguments",
) -> "torch.optim.Optimizer":
if len(finetuning_args.galore_target) == 1 and finetuning_args.galore_target[0] == "all":
galore_targets = find_all_linear_modules(model, finetuning_args.freeze_vision_tower)
else:
galore_targets = finetuning_args.galore_target
galore_params: list[torch.nn.Parameter] = []
for name, module in model.named_modules():
if isinstance(module, torch.nn.Linear) and any(target in name for target in galore_targets):
for param in module.parameters():
if param.requires_grad and len(param.shape) > 1:
galore_params.append(param)
galore_kwargs = {
"rank": finetuning_args.galore_rank,
"update_proj_gap": finetuning_args.galore_update_interval,
"scale": finetuning_args.galore_scale,
"proj_type": finetuning_args.galore_proj_type,
}
id_galore_params = {id(param) for param in galore_params}
decay_params, nodecay_params = [], [] # they are non-galore parameters
trainable_params: list[torch.nn.Parameter] = [] # galore_params + decay_params + nodecay_params
decay_param_names = _get_decay_parameter_names(model)
for name, param in model.named_parameters():
if param.requires_grad:
trainable_params.append(param)
if id(param) not in id_galore_params:
if name in decay_param_names:
decay_params.append(param)
else:
nodecay_params.append(param)
_, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
if training_args.optim == "adamw_torch":
optim_class = GaLoreAdamW
elif training_args.optim in ["adamw_bnb_8bit", "adamw_8bit", "paged_adamw_8bit"]:
optim_class = GaLoreAdamW8bit
elif training_args.optim == "adafactor":
optim_class = GaLoreAdafactor
else:
raise NotImplementedError(f"Unknown optim: {training_args.optim}.")
if finetuning_args.galore_layerwise:
logger.warning_rank0("The displayed gradient norm will be all zeros in layerwise GaLore.")
if training_args.gradient_accumulation_steps != 1:
raise ValueError("Per-layer GaLore does not support gradient accumulation.")
optimizer_dict: dict[torch.Tensor, torch.optim.Optimizer] = {}
for param in nodecay_params:
param_groups = [dict(params=[param], weight_decay=0.0)]
optimizer_dict[param] = optim_class(param_groups, **optim_kwargs)
for param in decay_params:
param_groups = [dict(params=[param], weight_decay=training_args.weight_decay)]
optimizer_dict[param] = optim_class(param_groups, **optim_kwargs)
for param in galore_params: # galore params have weight decay
param_groups = [dict(params=[param], weight_decay=training_args.weight_decay, **galore_kwargs)]
optimizer_dict[param] = optim_class(param_groups, **optim_kwargs)
def optimizer_hook(param: "torch.nn.Parameter"):
if param.grad is not None:
optimizer_dict[param].step()
optimizer_dict[param].zero_grad()
for param in trainable_params:
param.register_post_accumulate_grad_hook(optimizer_hook)
optimizer = DummyOptimizer(lr=training_args.learning_rate, optimizer_dict=optimizer_dict)
else:
param_groups = [
dict(params=nodecay_params, weight_decay=0.0),
dict(params=decay_params, weight_decay=training_args.weight_decay),
dict(params=galore_params, weight_decay=training_args.weight_decay, **galore_kwargs),
]
optimizer = optim_class(param_groups, **optim_kwargs)
logger.info_rank0(
f"Using GaLore optimizer with args: {galore_kwargs}. "
"It may cause hanging at the start of training, wait patiently."
)
return optimizer
def _create_apollo_optimizer(
model: "PreTrainedModel",
training_args: "TrainingArguments",
finetuning_args: "FinetuningArguments",
) -> "torch.optim.Optimizer":
if len(finetuning_args.apollo_target) == 1 and finetuning_args.apollo_target[0] == "all":
apollo_targets = find_all_linear_modules(model, finetuning_args.freeze_vision_tower)
else:
apollo_targets = finetuning_args.apollo_target
apollo_params: list[torch.nn.Parameter] = []
for name, module in model.named_modules():
if isinstance(module, torch.nn.Linear) and any(target in name for target in apollo_targets):
for param in module.parameters():
if param.requires_grad and len(param.shape) > 1:
apollo_params.append(param)
apollo_kwargs = {
"rank": finetuning_args.apollo_rank,
"proj": finetuning_args.apollo_proj,
"proj_type": finetuning_args.apollo_proj_type,
"update_proj_gap": finetuning_args.apollo_update_interval,
"scale": finetuning_args.apollo_scale,
"scale_type": finetuning_args.apollo_scale_type,
"scale_front": finetuning_args.apollo_scale_front,
}
id_apollo_params = {id(param) for param in apollo_params}
decay_params, nodecay_params = [], [] # they are non-apollo parameters
trainable_params: list[torch.nn.Parameter] = [] # apollo_params + decay_params + nodecay_params
decay_param_names = _get_decay_parameter_names(model)
for name, param in model.named_parameters():
if param.requires_grad:
trainable_params.append(param)
if id(param) not in id_apollo_params:
if name in decay_param_names:
decay_params.append(param)
else:
nodecay_params.append(param)
_, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
if training_args.optim == "adamw_torch":
optim_class = APOLLOAdamW
else:
raise NotImplementedError(f"Unknown optim: {training_args.optim}.")
if finetuning_args.apollo_layerwise:
logger.warning_rank0("The displayed gradient norm will be all zeros in layerwise APOLLO.")
if training_args.gradient_accumulation_steps != 1:
raise ValueError("Per-layer APOLLO does not support gradient accumulation.")
optimizer_dict: dict[torch.Tensor, torch.optim.Optimizer] = {}
for param in nodecay_params:
param_groups = [dict(params=[param], weight_decay=0.0)]
optimizer_dict[param] = optim_class(param_groups, **optim_kwargs)
for param in decay_params:
param_groups = [dict(params=[param], weight_decay=training_args.weight_decay)]
optimizer_dict[param] = optim_class(param_groups, **optim_kwargs)
for param in apollo_params: # apollo params have weight decay
param_groups = [dict(params=[param], weight_decay=training_args.weight_decay, **apollo_kwargs)]
optimizer_dict[param] = optim_class(param_groups, **optim_kwargs)
def optimizer_hook(param: "torch.nn.Parameter"):
if param.grad is not None:
optimizer_dict[param].step()
optimizer_dict[param].zero_grad()
for param in trainable_params:
param.register_post_accumulate_grad_hook(optimizer_hook)
optimizer = DummyOptimizer(lr=training_args.learning_rate, optimizer_dict=optimizer_dict)
else:
param_groups = [
dict(params=nodecay_params, weight_decay=0.0),
dict(params=decay_params, weight_decay=training_args.weight_decay),
dict(params=apollo_params, weight_decay=training_args.weight_decay, **apollo_kwargs),
]
optimizer = optim_class(param_groups, **optim_kwargs)
logger.info_rank0(f"Using APOLLO optimizer with args: {apollo_kwargs}.")
return optimizer
def _create_loraplus_optimizer(
model: "PreTrainedModel",
training_args: "TrainingArguments",
finetuning_args: "FinetuningArguments",
) -> "torch.optim.Optimizer":
default_lr = training_args.learning_rate
loraplus_lr = training_args.learning_rate * finetuning_args.loraplus_lr_ratio
embedding_lr = finetuning_args.loraplus_lr_embedding
decay_param_names = _get_decay_parameter_names(model)
param_dict: dict[str, list[torch.nn.Parameter]] = {
"lora_a": [],
"lora_b": [],
"lora_b_nodecay": [],
"embedding": [],
}
for name, param in model.named_parameters():
if param.requires_grad:
if "lora_embedding_B" in name:
param_dict["embedding"].append(param)
elif "lora_B" in name or param.ndim == 1:
if name in decay_param_names:
param_dict["lora_b"].append(param)
else:
param_dict["lora_b_nodecay"].append(param)
else:
param_dict["lora_a"].append(param)
optim_class, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
param_groups = [
dict(params=param_dict["lora_a"], lr=default_lr, weight_decay=training_args.weight_decay),
dict(params=param_dict["lora_b"], lr=loraplus_lr, weight_decay=training_args.weight_decay),
dict(params=param_dict["lora_b_nodecay"], lr=loraplus_lr, weight_decay=0.0),
dict(params=param_dict["embedding"], lr=embedding_lr, weight_decay=training_args.weight_decay),
]
optimizer = optim_class(param_groups, **optim_kwargs)
logger.info_rank0(f"Using LoRA+ optimizer with loraplus lr ratio {finetuning_args.loraplus_lr_ratio:.2f}.")
return optimizer
def _create_badam_optimizer(
model: "PreTrainedModel",
training_args: "TrainingArguments",
finetuning_args: "FinetuningArguments",
) -> "torch.optim.Optimizer":
decay_params, nodecay_params = [], []
decay_param_names = _get_decay_parameter_names(model)
for name, param in model.named_parameters():
if param.requires_grad:
if name in decay_param_names:
decay_params.append(param)
else:
nodecay_params.append(param)
optim_class, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
param_groups = [
dict(params=nodecay_params, weight_decay=0.0),
dict(params=decay_params, weight_decay=training_args.weight_decay),
]
if finetuning_args.badam_mode == "layer":
from badam import BlockOptimizer # type: ignore
base_optimizer = optim_class(param_groups, **optim_kwargs)
optimizer = BlockOptimizer(
base_optimizer=base_optimizer,
named_parameters_list=list(model.named_parameters()),
block_prefix_list=None,
switch_block_every=finetuning_args.badam_switch_interval,
start_block=finetuning_args.badam_start_block,
switch_mode=finetuning_args.badam_switch_mode,
verbose=finetuning_args.badam_verbose,
ds_zero3_enabled=is_deepspeed_zero3_enabled(),
)
logger.info_rank0(
f"Using BAdam optimizer with layer-wise update, switch mode is {finetuning_args.badam_switch_mode}, "
f"switch block every {finetuning_args.badam_switch_interval} steps, "
f"default start block is {finetuning_args.badam_start_block}"
)
elif finetuning_args.badam_mode == "ratio":
from badam import BlockOptimizerRatio # type: ignore
assert finetuning_args.badam_update_ratio > 1e-6
optimizer = BlockOptimizerRatio(
param_groups=param_groups,
named_parameters_list=list(model.named_parameters()),
update_ratio=finetuning_args.badam_update_ratio,
mask_mode=finetuning_args.badam_mask_mode,
verbose=finetuning_args.badam_verbose,
include_embedding=False,
**optim_kwargs,
)
logger.info_rank0(
f"Using BAdam optimizer with ratio-based update, update ratio is {finetuning_args.badam_update_ratio}, "
f"mask mode is {finetuning_args.badam_mask_mode}"
)
return optimizer
def _create_adam_mini_optimizer(
model: "PreTrainedModel",
training_args: "TrainingArguments",
) -> "torch.optim.Optimizer":
from adam_mini import Adam_mini # type: ignore
hidden_size = getattr(model.config, "hidden_size", None)
num_q_head = getattr(model.config, "num_attention_heads", None)
num_kv_head = getattr(model.config, "num_key_value_heads", None)
optimizer = Adam_mini(
named_parameters=model.named_parameters(),
lr=training_args.learning_rate,
betas=(training_args.adam_beta1, training_args.adam_beta2),
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
model_sharding=is_fsdp_enabled() or is_deepspeed_zero3_enabled(),
dim=hidden_size,
n_heads=num_q_head,
n_kv_heads=num_kv_head,
)
logger.info_rank0("Using Adam-mini optimizer.")
return optimizer
def _create_muon_optimizer(
model: "PreTrainedModel",
training_args: "TrainingArguments",
) -> "torch.optim.Optimizer":
from ..third_party.muon import Muon
muon_params, adamw_params = [], []
for name, param in model.named_parameters():
if param.requires_grad:
# Use Muon for 2D parameters that aren't embeddings or heads
if param.ndim == 2 and "embed" not in name and "lm_head" not in name:
muon_params.append(param)
else:
adamw_params.append(param)
optimizer = Muon(
lr=training_args.learning_rate,
wd=training_args.weight_decay,
muon_params=muon_params,
adamw_params=adamw_params,
adamw_betas=(training_args.adam_beta1, training_args.adam_beta2),
adamw_eps=training_args.adam_epsilon,
)
logger.info_rank0(
f"Using Muon optimizer with {len(muon_params)} Muon params and {len(adamw_params)} AdamW params."
)
return optimizer
def create_custom_optimizer(
model: "PreTrainedModel",
training_args: "TrainingArguments",
finetuning_args: "FinetuningArguments",
) -> Optional["torch.optim.Optimizer"]:
if finetuning_args.use_galore:
return _create_galore_optimizer(model, training_args, finetuning_args)
if finetuning_args.use_apollo:
return _create_apollo_optimizer(model, training_args, finetuning_args)
if finetuning_args.loraplus_lr_ratio is not None:
return _create_loraplus_optimizer(model, training_args, finetuning_args)
if finetuning_args.use_badam:
return _create_badam_optimizer(model, training_args, finetuning_args)
if finetuning_args.use_adam_mini:
return _create_adam_mini_optimizer(model, training_args)
if finetuning_args.use_muon:
return _create_muon_optimizer(model, training_args)
def create_custom_scheduler(
training_args: "TrainingArguments",
num_training_steps: int,
optimizer: Optional["torch.optim.Optimizer"] = None,
) -> None:
if training_args.lr_scheduler_type == "warmup_stable_decay":
num_warmup_steps = training_args.get_warmup_steps(num_training_steps)
remaining_steps = num_training_steps - num_warmup_steps
num_stable_steps = remaining_steps // 3 # use 1/3 for stable by default
num_decay_steps = remaining_steps - num_stable_steps
scheduler_kwargs = training_args.lr_scheduler_kwargs or {}
default_kwargs = {
"num_stable_steps": num_stable_steps,
"num_decay_steps": num_decay_steps,
}
for key, value in default_kwargs.items():
if key not in scheduler_kwargs:
scheduler_kwargs[key] = value
training_args.lr_scheduler_kwargs = scheduler_kwargs
if optimizer is not None and isinstance(optimizer, DummyOptimizer):
optimizer_dict = optimizer.optimizer_dict
scheduler_dict: dict[torch.nn.Parameter, torch.optim.lr_scheduler.LRScheduler] = {}
for param in optimizer_dict.keys():
scheduler_dict[param] = get_scheduler(
training_args.lr_scheduler_type,
optimizer=optimizer_dict[param],
num_warmup_steps=training_args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
scheduler_specific_kwargs=training_args.lr_scheduler_kwargs,
)
def scheduler_hook(param: "torch.nn.Parameter"):
scheduler_dict[param].step()
for param in optimizer_dict.keys():
param.register_post_accumulate_grad_hook(scheduler_hook)
def get_batch_logps(
logits: "torch.Tensor",
labels: "torch.Tensor",
label_pad_token_id: int = IGNORE_INDEX,
ld_alpha: Optional[float] = None,
) -> tuple["torch.Tensor", "torch.Tensor"]:
if logits.shape[:-1] != labels.shape:
raise ValueError("Logits (batchsize x seqlen) and labels must have the same shape.")
labels = labels[:, 1:].clone()
logits = logits[:, :-1, :]
loss_mask = labels != label_pad_token_id
labels[labels == label_pad_token_id] = 0 # dummy token
per_token_logps = torch.gather(logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)).squeeze(2)
valid_length = loss_mask.sum(-1)
if ld_alpha is not None:
num_examples = labels.shape[0] // 2
chosen_lengths = valid_length[:num_examples]
rejected_lengths = valid_length[num_examples:]
min_lengths = torch.min(chosen_lengths, rejected_lengths)
start_positions = torch.argmax(loss_mask.int(), dim=1)
public_lengths = start_positions + torch.cat([min_lengths, min_lengths], dim=0)
seq_len = labels.shape[-1]
position_ids = torch.arange(seq_len, device=per_token_logps.device).expand_as(per_token_logps)
ld_mask = position_ids < public_lengths.unsqueeze(1)
front_mask = (ld_mask * loss_mask).float()
rear_mask = (~ld_mask * loss_mask).float()
front_logps = (per_token_logps * front_mask).sum(-1)
rear_logps = (per_token_logps * rear_mask).sum(-1)
logps = front_logps + ld_alpha * rear_logps
else:
logps = (per_token_logps * loss_mask).sum(-1)
return logps, valid_length
def dft_loss_func(
outputs: "torch.Tensor", labels: "torch.Tensor", num_items_in_batch: Optional["torch.Tensor"] = None
):
logits = outputs.get("logits")
if logits is None:
return outputs.get("loss", torch.tensor(0.0))
logits = logits.float()
vocab_size = logits.size(-1)
labels = torch.nn.functional.pad(labels, (0, 1), value=-100)
shift_labels = labels[..., 1:].contiguous()
logits = logits.view(-1, vocab_size)
shift_labels = shift_labels.view(-1)
shift_labels = shift_labels.to(logits.device)
loss = _dft_cross_entropy(logits, shift_labels, num_items_in_batch)
return loss
def _dft_cross_entropy(
source: "torch.Tensor",
target: "torch.Tensor",
num_items_in_batch: Optional["torch.Tensor"] = None,
ignore_index: int = -100,
) -> "torch.Tensor":
per_token_loss = torch.nn.functional.cross_entropy(source, target, ignore_index=ignore_index, reduction="none")
valid_mask = target != ignore_index
if not valid_mask.any():
return torch.tensor(0.0, device=source.device, dtype=source.dtype)
valid_losses = per_token_loss[valid_mask]
with torch.no_grad():
target_probs = torch.exp(-valid_losses)
weighted_losses = valid_losses * target_probs
if num_items_in_batch is not None:
total_loss = weighted_losses.sum()
if torch.is_tensor(num_items_in_batch):
num_items_in_batch = num_items_in_batch.to(total_loss.device)
loss = total_loss / num_items_in_batch
else:
loss = weighted_losses.mean()
return loss
def asft_loss_func(
outputs,
labels: torch.Tensor,
ref_logits: torch.Tensor,
asft_alpha: float = 0.1,
ignore_index: int = -100,
) -> torch.Tensor:
logits = outputs.get("logits")
if logits is None:
return outputs.get("loss", torch.tensor(0.0))
logits = logits.float()
# shift for causal LM
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
shift_ref_logits = ref_logits[..., :-1, :].contiguous()
vocab_size = shift_logits.size(-1)
# flatten
shift_logits = shift_logits.view(-1, vocab_size)
shift_ref_logits = shift_ref_logits.view(-1, vocab_size)
shift_labels = shift_labels.view(-1).to(shift_logits.device)
return _asft_cross_entropy(
policy_logits=shift_logits,
policy_labels=shift_labels,
ref_logits=shift_ref_logits,
asft_alpha=asft_alpha,
ignore_index=ignore_index,
)
def _asft_cross_entropy(
policy_logits: torch.Tensor,
policy_labels: torch.Tensor,
ref_logits: torch.Tensor,
asft_alpha: float = 0.1,
ignore_index: int = -100,
) -> torch.Tensor:
dft_loss = _dft_cross_entropy(
policy_logits,
policy_labels,
ignore_index=ignore_index,
)
kl_loss = _kl_divergence(
policy_logits,
ref_logits,
policy_labels,
ignore_index=ignore_index,
)
return dft_loss + asft_alpha * kl_loss
def _kl_divergence(
policy_logits: torch.Tensor,
ref_logits: torch.Tensor,
labels: torch.Tensor,
ignore_index: int = -100,
) -> torch.Tensor:
# log p(y|x)
log_p = F.log_softmax(policy_logits, dim=-1)
# q(y|x)
q = F.softmax(ref_logits, dim=-1)
# token-wise KL
kl = F.kl_div(
log_p,
q,
reduction="none",
).sum(dim=-1) # [N]
# mask padding tokens
mask = (labels != ignore_index).float()
return (kl * mask).sum() / mask.sum()
def eaft_loss_func(
outputs: "torch.Tensor",
labels: "torch.Tensor",
num_items_in_batch: Optional["torch.Tensor"] = None,
alpha: float = 1.0,
) -> "torch.Tensor":
logits = outputs.get("logits")
if logits is None:
return outputs.get("loss", torch.tensor(0.0))
logits = logits.float()
vocab_size = logits.size(-1)
labels = torch.nn.functional.pad(labels, (0, 1), value=-100)
shift_labels = labels[..., 1:].contiguous()
logits = logits.view(-1, vocab_size)
shift_labels = shift_labels.view(-1)
shift_labels = shift_labels.to(logits.device)
loss = _eaft_cross_entropy(logits, shift_labels, num_items_in_batch, alpha)
return loss
def _eaft_cross_entropy(
source: "torch.Tensor",
target: "torch.Tensor",
num_items_in_batch: Optional["torch.Tensor"] = None,
alpha: float = 1.0,
ignore_index: int = -100,
) -> "torch.Tensor":
per_token_loss = torch.nn.functional.cross_entropy(source, target, ignore_index=ignore_index, reduction="none")
valid_mask = target != ignore_index
if not valid_mask.any():
return torch.tensor(0.0, device=source.device, dtype=source.dtype)
valid_losses = per_token_loss[valid_mask]
with torch.no_grad():
source_detached = source[valid_mask].detach()
topk_val, _ = torch.topk(source_detached, k=20, dim=-1)
logsumexp_topk = torch.logsumexp(topk_val, dim=-1, keepdim=True)
log_probs_topk = topk_val - logsumexp_topk
probs_topk = torch.exp(log_probs_topk)
entropy_approx = -(probs_topk * log_probs_topk).sum(dim=-1)
entropy_term = entropy_approx / 3.0
adaptive_weight = torch.pow(entropy_term, alpha)
weighted_losses = valid_losses * adaptive_weight
if num_items_in_batch is not None:
total_loss = weighted_losses.sum()
if torch.is_tensor(num_items_in_batch):
num_items_in_batch = num_items_in_batch.to(total_loss.device)
loss = total_loss / num_items_in_batch
else:
loss = weighted_losses.mean()
return loss
def nested_detach(
tensors: Union["torch.Tensor", list["torch.Tensor"], tuple["torch.Tensor"], dict[str, "torch.Tensor"]],
clone: bool = False,
):
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_detach(t, clone=clone) for t in tensors)
elif isinstance(tensors, Mapping):
return type(tensors)({k: nested_detach(t, clone=clone) for k, t in tensors.items()})
if isinstance(tensors, torch.Tensor):
if clone:
return tensors.detach().clone()
else:
return tensors.detach()
else:
return tensors
def get_swanlab_callback(finetuning_args: "FinetuningArguments") -> "TrainerCallback":
import swanlab # type: ignore
from swanlab.integration.transformers import SwanLabCallback # type: ignore
if finetuning_args.swanlab_api_key is not None:
swanlab.login(api_key=finetuning_args.swanlab_api_key)
if finetuning_args.swanlab_lark_webhook_url is not None:
from swanlab.plugin.notification import LarkCallback # type: ignore
lark_callback = LarkCallback(
webhook_url=finetuning_args.swanlab_lark_webhook_url,
secret=finetuning_args.swanlab_lark_secret,
)
swanlab.register_callbacks([lark_callback])
class SwanLabCallbackExtension(SwanLabCallback):
def setup(self, args: "TrainingArguments", state: "TrainerState", model: "PreTrainedModel", **kwargs):
if not state.is_world_process_zero:
return
super().setup(args, state, model, **kwargs)
try:
if hasattr(self, "_swanlab"):
swanlab_public_config = self._swanlab.get_run().public.json()
else: # swanlab <= 0.4.9
swanlab_public_config = self._experiment.get_run().public.json()
except Exception:
swanlab_public_config = {}
with open(os.path.join(args.output_dir, SWANLAB_CONFIG), "w") as f:
f.write(json.dumps(swanlab_public_config, indent=2))
swanlab_callback = SwanLabCallbackExtension(
project=finetuning_args.swanlab_project,
workspace=finetuning_args.swanlab_workspace,
experiment_name=finetuning_args.swanlab_run_name,
mode=finetuning_args.swanlab_mode,
config={"Framework": "🦙LlamaFactory"},
logdir=finetuning_args.swanlab_logdir,
tags=["🦙LlamaFactory"],
)
return swanlab_callback
def get_placement_group(num_workers: int) -> tuple["PlacementGroup", dict[str, int]]:
bundle = {"CPU": 10}
device_name = get_device_name().upper()
if device_name != "CPU":
bundle[device_name] = 1
bundles = [bundle for _ in range(num_workers)]
pg = placement_group(bundles, strategy="PACK")
return pg, bundle
def get_ray_remote_config_for_worker(
placement_group: "PlacementGroup",
bundle_idx: int,
rank: int,
world_size: int,
master_addr: str,
master_port: str,
env: dict[str, str] = None,
) -> dict[str, Any]:
env_vars = {
"RANK": str(rank),
"WORLD_SIZE": str(world_size),
"MASTER_ADDR": master_addr,
"MASTER_PORT": master_port,
"TORCHELASTIC_USE_AGENT_STORE": "False",
}
env.update(env_vars)
remote_config = {
"scheduling_strategy": PlacementGroupSchedulingStrategy(
placement_group=placement_group,
placement_group_bundle_index=bundle_idx,
),
"runtime_env": {"env_vars": env},
"num_cpus": 10,
}
device_name = get_device_name()
if device_name == "gpu":
remote_config["num_gpus"] = 1
elif device_name == "npu":
remote_config["resources"] = {"NPU": 1}
return remote_config
def get_ray_head_node_ip() -> str:
head_ip = next(node["node_ip"] for node in list_nodes() if node.get("is_head_node", False))
return head_ip
def sort_placement_group_by_node_ip(placement_group: "PlacementGroup", master_addr: str = None) -> list[int]:
@ray.remote
def _get_node_ip():
return ray.util.get_node_ip_address().strip("[]")
tasks = []
for bundle_idx in range(placement_group.bundle_count):
task = _get_node_ip.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=placement_group,
placement_group_bundle_index=bundle_idx,
),
).remote()
tasks.append(task)
bundle_ips = ray.get(tasks)
bundle_node_ip_list = list(enumerate(bundle_ips))
sorted_bundle_node_ip_list = sorted(bundle_node_ip_list, key=lambda x: x[1])
sorted_bundle_indices = [item[0] for item in sorted_bundle_node_ip_list]
if master_addr is not None:
preferred_indices = [idx for idx, ip in bundle_node_ip_list if ip == master_addr]
if preferred_indices:
remaining = [i for i in sorted_bundle_indices if i not in preferred_indices]
sorted_bundle_indices = preferred_indices + remaining
return sorted_bundle_indices | --- +++ @@ -66,6 +66,7 @@
class DummyOptimizer(torch.optim.Optimizer):
+ r"""A dummy optimizer used for the GaLore or APOLLO algorithm."""
def __init__(
self, lr: float = 1e-3, optimizer_dict: Optional[dict["torch.nn.Parameter", "torch.optim.Optimizer"]] = None
@@ -115,6 +116,10 @@ def create_ref_model(
model_args: "ModelArguments", finetuning_args: "FinetuningArguments", add_valuehead: bool = False
) -> Optional[Union["PreTrainedModel", "AutoModelForCausalLMWithValueHead"]]:
+ r"""Create reference model for PPO/DPO training. Evaluation mode is not supported.
+
+ The valuehead parameter is randomly initialized since it is useless for PPO training.
+ """
if finetuning_args.ref_model is not None:
ref_model_args = ModelArguments.copyfrom(
model_args,
@@ -146,6 +151,7 @@ def create_reward_model(
model: "AutoModelForCausalLMWithValueHead", model_args: "ModelArguments", finetuning_args: "FinetuningArguments"
) -> Optional["AutoModelForCausalLMWithValueHead"]:
+ r"""Create reward model for PPO training."""
if finetuning_args.reward_model_type == "api":
assert finetuning_args.reward_model.startswith("http"), "Please provide full url."
logger.info_rank0(f"Use reward server {finetuning_args.reward_model}")
@@ -185,6 +191,7 @@
def _get_decay_parameter_names(model: "PreTrainedModel") -> list[str]:
+ r"""Return a list of names of parameters with weight decay. (weights in non-layernorm layers)."""
decay_parameters = get_parameter_names(model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
return decay_parameters
@@ -588,6 +595,13 @@ label_pad_token_id: int = IGNORE_INDEX,
ld_alpha: Optional[float] = None,
) -> tuple["torch.Tensor", "torch.Tensor"]:
+ r"""Compute the log probabilities of the given labels under the given logits.
+
+ Returns:
+ logps: A tensor of shape (batch_size,) containing the sum of log probabilities.
+ valid_length: A tensor of shape (batch_size,) containing the number of non-masked tokens.
+
+ """
if logits.shape[:-1] != labels.shape:
raise ValueError("Logits (batchsize x seqlen) and labels must have the same shape.")
@@ -816,6 +830,7 @@ tensors: Union["torch.Tensor", list["torch.Tensor"], tuple["torch.Tensor"], dict[str, "torch.Tensor"]],
clone: bool = False,
):
+ r"""Detach `tensors` (even if it's a nested list/tuple/dict of tensors)."""
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_detach(t, clone=clone) for t in tensors)
elif isinstance(tensors, Mapping):
@@ -831,6 +846,7 @@
def get_swanlab_callback(finetuning_args: "FinetuningArguments") -> "TrainerCallback":
+ r"""Get the callback for logging to SwanLab."""
import swanlab # type: ignore
from swanlab.integration.transformers import SwanLabCallback # type: ignore
@@ -876,6 +892,7 @@
def get_placement_group(num_workers: int) -> tuple["PlacementGroup", dict[str, int]]:
+ r"""Get the Ray placement group for distributed training."""
bundle = {"CPU": 10}
device_name = get_device_name().upper()
if device_name != "CPU":
@@ -895,6 +912,7 @@ master_port: str,
env: dict[str, str] = None,
) -> dict[str, Any]:
+ r"""Get the remote config for a Ray worker."""
env_vars = {
"RANK": str(rank),
"WORLD_SIZE": str(world_size),
@@ -923,11 +941,13 @@
def get_ray_head_node_ip() -> str:
+ r"""Get the IP address of the Ray head node."""
head_ip = next(node["node_ip"] for node in list_nodes() if node.get("is_head_node", False))
return head_ip
def sort_placement_group_by_node_ip(placement_group: "PlacementGroup", master_addr: str = None) -> list[int]:
+ r"""Sort the placement group bundles by their node IP addresses."""
@ray.remote
def _get_node_ip():
@@ -955,4 +975,4 @@ remaining = [i for i in sorted_bundle_indices if i not in preferred_indices]
sorted_bundle_indices = preferred_indices + remaining
- return sorted_bundle_indices+ return sorted_bundle_indices
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/trainer_utils.py |
Insert docstrings into my code | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's TRL library.
# https://github.com/huggingface/trl/blob/v0.8.0/trl/trainer/ppo_trainer.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import sys
import warnings
from types import MethodType
from typing import TYPE_CHECKING, Any, Optional
import torch
from accelerate.utils import DistributedDataParallelKwargs
from tqdm import tqdm
from transformers import GenerationConfig, Trainer, TrainerControl, TrainerState
from transformers.optimization import get_scheduler
from transformers.trainer import DEFAULT_CALLBACKS
from transformers.trainer_callback import CallbackHandler
from transformers.trainer_pt_utils import remove_dummy_checkpoint
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR
from transformers.utils import SAFE_WEIGHTS_NAME, WEIGHTS_NAME
from trl import PPOConfig, PPOTrainer
from trl import __version__ as trl_version
from trl.models.utils import unwrap_model_for_generation
from typing_extensions import override
from ...extras import logging
from ...extras.misc import AverageMeter, count_parameters, get_current_device, get_logits_processor, torch_gc
from ..callbacks import FixValueHeadModelCallback, SaveProcessorCallback
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler
from .ppo_utils import dump_layernorm, get_rewards_from_server, replace_model, restore_layernorm
if TYPE_CHECKING:
from datasets import Dataset
from transformers import (
DataCollatorWithPadding,
PreTrainedTokenizer,
ProcessorMixin,
Seq2SeqTrainingArguments,
TrainerCallback,
)
from trl import AutoModelForCausalLMWithValueHead
from ...hparams import FinetuningArguments, GeneratingArguments, ModelArguments
logger = logging.get_logger(__name__)
class CustomPPOTrainer(PPOTrainer, Trainer):
def __init__(
self,
model_args: "ModelArguments",
training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
generating_args: "GeneratingArguments",
callbacks: Optional[list["TrainerCallback"]],
model: "AutoModelForCausalLMWithValueHead",
reward_model: Optional["AutoModelForCausalLMWithValueHead"],
ref_model: Optional["AutoModelForCausalLMWithValueHead"],
tokenizer: "PreTrainedTokenizer",
processor: Optional["ProcessorMixin"],
data_collator: "DataCollatorWithPadding",
train_dataset: Optional["Dataset"] = None,
eval_dataset: Optional["Dataset"] = None,
) -> None:
if eval_dataset is not None:
raise NotImplementedError("PPOTrainer does not support eval dataset yet.")
# Check if TRL version is compatible (0.8.6 <= version <= 0.9.6)
try:
from transformers.utils.versions import require_version
require_version(
"trl>=0.8.6,<=0.9.6",
"Incompatible TRL version detected. LLaMA-Factory ppo requires TRL version >=0.8.6,<=0.9.6. "
f"Found version {trl_version}. Please install the correct version with: `pip install trl>=0.8.6,<=0.9.6`\n"
"To fix: run `DISABLE_VERSION_CHECK=1 llamafactory-cli train example_ppo.yaml`\n",
)
except ImportError as e:
raise e
backward_batch_size = training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps
ppo_config = PPOConfig(
model_name=model_args.model_name_or_path,
learning_rate=training_args.learning_rate,
mini_batch_size=training_args.per_device_train_batch_size,
batch_size=backward_batch_size * finetuning_args.ppo_buffer_size,
gradient_accumulation_steps=training_args.gradient_accumulation_steps,
ppo_epochs=finetuning_args.ppo_epochs,
max_grad_norm=training_args.max_grad_norm,
seed=training_args.seed,
optimize_device_cache=True,
target=finetuning_args.ppo_target,
use_score_scaling=finetuning_args.ppo_score_norm,
use_score_norm=finetuning_args.ppo_score_norm,
whiten_rewards=finetuning_args.ppo_whiten_rewards,
accelerator_kwargs={"step_scheduler_with_optimizer": False},
log_with=training_args.report_to[0] if training_args.report_to else None,
project_kwargs={"logging_dir": training_args.logging_dir},
)
# Add deepspeed config
if training_args.deepspeed_plugin is not None:
ppo_config.accelerator_kwargs["kwargs_handlers"] = [
DistributedDataParallelKwargs(find_unused_parameters=training_args.ddp_find_unused_parameters)
]
ppo_config.accelerator_kwargs["deepspeed_plugin"] = training_args.deepspeed_plugin
if ppo_config.log_with is not None:
logger.warning_rank0("PPOTrainer cannot use external logger when DeepSpeed is enabled.")
ppo_config.log_with = None
# Create optimizer and scheduler
if training_args.max_steps > 0:
num_training_steps = training_args.max_steps
else:
total_train_batch_size = backward_batch_size * finetuning_args.ppo_buffer_size * training_args.world_size
num_training_steps = training_args.num_train_epochs * math.ceil(
len(train_dataset) / total_train_batch_size
)
optimizer = self.create_optimizer(model, training_args, finetuning_args)
scheduler = self.create_scheduler(training_args, num_training_steps, optimizer)
PPOTrainer.__init__(
self,
config=ppo_config,
model=model,
ref_model=ref_model,
tokenizer=tokenizer,
dataset=train_dataset,
optimizer=optimizer,
data_collator=data_collator,
lr_scheduler=scheduler,
)
self.args = training_args
self.model_args = model_args
self.finetuning_args = finetuning_args
self.reward_model = reward_model
self.current_device = get_current_device() # patch for deepspeed training
self.generation_config = GenerationConfig(
pad_token_id=self.tokenizer.pad_token_id,
eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids,
**generating_args.to_dict(),
)
self.state = TrainerState()
self.control = TrainerControl()
self.is_deepspeed_enabled = getattr(self.accelerator.state, "deepspeed_plugin", None) is not None
self.is_fsdp_enabled = getattr(self.accelerator.state, "fsdp_plugin", None) is not None
callbacks = DEFAULT_CALLBACKS if callbacks is None else DEFAULT_CALLBACKS + callbacks
self.callback_handler = CallbackHandler(
callbacks, self.accelerator.unwrap_model(self.model), self.tokenizer, self.optimizer, self.lr_scheduler
)
if self.args.max_steps > 0:
logger.info_rank0("max_steps is given, it will override any value given in num_train_epochs")
self.amp_context = torch.autocast(self.current_device.type)
warnings.simplefilter("ignore") # remove gc warnings on ref model
if finetuning_args.reward_model_type == "full":
if self.is_deepspeed_enabled:
if not (
getattr(reward_model.pretrained_model, "is_loaded_in_8bit", False)
or getattr(reward_model.pretrained_model, "is_loaded_in_4bit", False)
): # quantized models are already set on the correct device
self.reward_model = self._prepare_deepspeed(self.reward_model)
else:
self.reward_model = self.accelerator.prepare_model(self.reward_model, evaluation_mode=True)
self.add_callback(FixValueHeadModelCallback)
if processor is not None:
self.add_callback(SaveProcessorCallback(processor))
if finetuning_args.use_badam:
from badam import BAdamCallback, clip_grad_norm_old_version # type: ignore
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
self.add_callback(BAdamCallback)
def ppo_train(self, resume_from_checkpoint: Optional[str] = None) -> None:
if resume_from_checkpoint is not None:
raise ValueError("`resume_from_checkpoint` will be supported in the future version.")
total_train_batch_size = (
self.args.per_device_train_batch_size
* self.args.gradient_accumulation_steps
* self.finetuning_args.ppo_buffer_size
* self.args.world_size
)
if self.args.max_steps > 0:
num_examples = total_train_batch_size * self.args.max_steps
num_train_epochs = sys.maxsize
max_steps = self.args.max_steps
steps_in_epoch = self.args.max_steps
else:
len_dataloader = len(self.dataloader)
num_examples = len(self.dataset)
num_train_epochs = self.args.num_train_epochs
max_steps = math.ceil(num_train_epochs * len_dataloader)
steps_in_epoch = len_dataloader
self.state.max_steps = max_steps
self.state.num_train_epochs = num_train_epochs
self.state.is_local_process_zero = self.is_local_process_zero()
self.state.is_world_process_zero = self.is_world_process_zero()
logger.info_rank0("***** Running training *****")
logger.info_rank0(f" Num examples = {num_examples:,}")
logger.info_rank0(f" Num Epochs = {num_train_epochs:,}")
logger.info_rank0(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size:,}")
logger.info_rank0(
f" Total train batch size (w. parallel, buffer, distributed & accumulation) = {total_train_batch_size:,}"
)
logger.info_rank0(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps:,}")
logger.info_rank0(f" Num optimization epochs per batch = {self.finetuning_args.ppo_epochs:,}")
logger.info_rank0(f" Total training steps = {max_steps:,}")
logger.info_rank0(f" Number of trainable parameters = {count_parameters(self.model)[0]:,}")
dataiter = iter(self.dataloader)
loss_meter = AverageMeter()
reward_meter = AverageMeter()
self.callback_handler.on_train_begin(self.args, self.state, self.control)
for step in tqdm(range(max_steps), disable=not self.is_local_process_zero()):
try:
batch = next(dataiter)
except StopIteration:
dataiter = iter(self.dataloader)
batch = next(dataiter)
# Get inputs
self.model.eval()
self.tokenizer.padding_side = "right" # change padding side
queries, responses, rewards = [], [], []
for idx in range(0, self.config.batch_size, self.config.mini_batch_size):
mini_batch = {
"input_ids": batch["input_ids"][idx : idx + self.config.mini_batch_size],
"attention_mask": batch["attention_mask"][idx : idx + self.config.mini_batch_size],
}
mini_batch_queries, mini_batch_responses = self.get_inputs(mini_batch)
mini_batch_rewards = self.get_rewards(mini_batch_queries, mini_batch_responses)
queries.extend(mini_batch_queries)
responses.extend(mini_batch_responses)
rewards.extend(mini_batch_rewards)
# Run PPO step
self.model.train()
stats = self.step(queries, responses, rewards)
self.tokenizer.padding_side = "left" # restore padding side
loss_meter.update(float(stats["ppo/loss/total"]), n=len(rewards))
reward_meter.update(torch.stack(rewards).mean().item(), n=len(rewards))
if self.config.log_with is not None:
try:
batch["query"] = self.tokenizer.batch_decode(queries, skip_special_tokens=True)
batch["response"] = self.tokenizer.batch_decode(responses, skip_special_tokens=True)
self.log_stats(stats, batch, rewards)
except Exception:
logger.warning_rank0("Failed to save stats due to unknown errors.")
self.state.global_step += 1
self.callback_handler.on_step_end(self.args, self.state, self.control)
if self.is_local_process_zero() and (step + 1) % self.args.logging_steps == 0:
logs = dict(
loss=round(loss_meter.avg, 4),
reward=round(reward_meter.avg, 4),
learning_rate=stats["ppo/learning_rate"],
epoch=round(step / steps_in_epoch, 2),
)
tqdm.write(str(logs))
logs["step"] = step
self.state.log_history.append(logs)
self.callback_handler.on_log(self.args, self.state, self.control, logs)
loss_meter.reset()
reward_meter.reset()
if (step + 1) % self.args.save_steps == 0: # save checkpoint
self.save_model(
os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}")
)
self.callback_handler.on_save(self.args, self.state, self.control)
if self.control.should_epoch_stop or self.control.should_training_stop:
break
self.callback_handler.on_train_end(self.args, self.state, self.control)
@override
def create_optimizer(
self,
model: "AutoModelForCausalLMWithValueHead",
training_args: "Seq2SeqTrainingArguments",
finetuning_args: "FinetuningArguments",
) -> "torch.optim.Optimizer":
optimizer = create_custom_optimizer(model, training_args, finetuning_args)
if optimizer is None:
decay_params, nodecay_params = [], []
decay_param_names = self.get_decay_parameter_names(model)
for name, param in model.named_parameters():
if param.requires_grad:
if name in decay_param_names:
decay_params.append(param)
else:
nodecay_params.append(param)
optim_class, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args)
param_groups = [
dict(params=nodecay_params),
dict(params=decay_params, weight_decay=training_args.weight_decay),
]
optimizer = optim_class(param_groups, **optim_kwargs)
return optimizer
@override
def create_scheduler(
self, training_args: "Seq2SeqTrainingArguments", num_training_steps: int, optimizer: "torch.optim.Optimizer"
) -> "torch.optim.lr_scheduler.LRScheduler":
create_custom_scheduler(training_args, num_training_steps, optimizer)
lr_scheduler = get_scheduler(
training_args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=training_args.get_warmup_steps(num_training_steps),
num_training_steps=num_training_steps,
)
return lr_scheduler
@torch.no_grad()
def get_inputs(self, batch: dict[str, "torch.Tensor"]) -> tuple[list["torch.Tensor"], list["torch.Tensor"]]:
if batch["input_ids"].size(0) == 1: # handle llama2 ppo with gradient accumulation > 1
start_index = (batch["input_ids"][0] != self.tokenizer.pad_token_id).nonzero()[0].item()
for k, v in batch.items():
batch[k] = v[:, start_index:]
with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model:
unwrapped_model: AutoModelForCausalLMWithValueHead = self.accelerator.unwrap_model(self.model)
if self.model_args.upcast_layernorm:
layernorm_params = dump_layernorm(unwrapped_model)
generate_output: torch.Tensor = unwrapped_model.generate(
generation_config=self.generation_config, logits_processor=get_logits_processor(), **batch
)
if self.model_args.upcast_layernorm:
restore_layernorm(unwrapped_model, layernorm_params)
query = batch["input_ids"].detach().cpu()
response = generate_output[:, batch["input_ids"].size(-1) :].detach().cpu()
queries, responses = [], []
for i in range(len(query)):
query_start_index = (query[i] != self.tokenizer.pad_token_id).nonzero()[0].item()
response_indexes = (response[i] != self.tokenizer.pad_token_id).nonzero()
if len(response_indexes) == 0: # allow empty response
response_length = 1
elif self.tokenizer.eos_token_id == self.tokenizer.pad_token_id: # include eos token
response_length = response_indexes[-1].item() + 2
else:
response_length = response_indexes[-1].item() + 1
queries.append(query[i, query_start_index:]) # remove padding from left
responses.append(response[i, :response_length]) # remove padding from right
return queries, responses
@torch.no_grad()
def get_rewards(
self,
queries: list["torch.Tensor"],
responses: list["torch.Tensor"],
) -> list["torch.Tensor"]:
if self.finetuning_args.reward_model_type == "api":
token_ids = [torch.cat((q, r), dim=-1).tolist() for q, r in zip(queries, responses)]
messages = self.tokenizer.batch_decode(token_ids, skip_special_tokens=False)
return get_rewards_from_server(self.reward_model, messages)
batch: dict[str, torch.Tensor] = self.prepare_model_inputs(queries, responses)
unwrapped_model: AutoModelForCausalLMWithValueHead = self.accelerator.unwrap_model(self.model)
if self.finetuning_args.reward_model_type in ["lora", "oft"]:
replace_model(unwrapped_model, target="reward")
reward_model = self.model
else:
reward_model = self.reward_model
with unwrap_model_for_generation(reward_model, self.accelerator), self.amp_context: # support bf16
values: torch.Tensor = reward_model(**batch, return_dict=True, use_cache=False)[-1]
if self.finetuning_args.reward_model_type in ["lora", "oft"]:
replace_model(unwrapped_model, target="default")
rewards = values.gather(dim=-1, index=(batch["attention_mask"].sum(dim=-1, keepdim=True) - 1))
return rewards.float().detach() # use fp32 type
@override
def batched_forward_pass(
self,
model: "AutoModelForCausalLMWithValueHead",
queries: "torch.Tensor",
responses: "torch.Tensor",
model_inputs: dict[str, Any],
return_logits: bool = False,
response_masks: Optional["torch.Tensor"] = None,
) -> tuple["torch.Tensor", Optional["torch.Tensor"], "torch.Tensor", "torch.Tensor"]:
from trl.core import logprobs_from_logits
torch_gc()
bs = len(queries)
fbs = self.config.mini_batch_size
all_logprobs = []
all_logits = []
all_masks = []
all_values = []
for i in range(math.ceil(bs / fbs)):
input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()}
query_batch = queries[i * fbs : (i + 1) * fbs]
response_batch = responses[i * fbs : (i + 1) * fbs]
if response_masks is not None:
response_masks_batch = response_masks[i * fbs : (i + 1) * fbs]
input_ids = input_kwargs["input_ids"]
attention_mask = input_kwargs["attention_mask"]
with self.amp_context: # support bf16
logits, _, values = model(**input_kwargs, return_dict=True, use_cache=False)
logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:])
masks = torch.zeros_like(attention_mask)
masks[:, :-1] = attention_mask[:, 1:]
for j in range(len(query_batch)):
start = len(query_batch[j]) - 1
if attention_mask[j, 0] == 0: # offset left padding
start += attention_mask[j, :].nonzero()[0].item()
end = start + len(response_batch[j])
if response_masks is not None:
response_masks_batch = torch.cat((torch.zeros_like(query_batch[j]), response_masks_batch[j]))[1:]
masks[j, :start] = 0
masks[j, end:] = 0
if response_masks is not None:
masks[j, start:end] = masks[j, start:end] * response_masks_batch[j][start:end]
if return_logits:
all_logits.append(logits)
else:
del logits
all_values.append(values)
all_logprobs.append(logprobs)
all_masks.append(masks)
return (
torch.cat(all_logprobs),
torch.cat(all_logits)[:, :-1] if return_logits else None,
torch.cat(all_values)[:, :-1],
torch.cat(all_masks)[:, :-1],
)
@override
def save_model(self, output_dir: Optional[str] = None) -> None:
if output_dir is None:
output_dir = self.args.output_dir
if self.is_fsdp_enabled or self.is_deepspeed_enabled:
try:
state_dict = self.accelerator.get_state_dict(self.model) # must be called at all ranks
if self.args.should_save:
self._save(output_dir, state_dict=state_dict)
except ValueError:
logger.warning_rank0(
" stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead,"
" use zero_to_fp32.py to recover weights"
)
if self.args.should_save:
self._save(output_dir, state_dict={})
# remove the dummy state_dict
remove_dummy_checkpoint(self.args.should_save, output_dir, [WEIGHTS_NAME, SAFE_WEIGHTS_NAME])
self.model.save_checkpoint(output_dir)
elif self.args.should_save:
unwrapped_model: AutoModelForCausalLMWithValueHead = self.accelerator.unwrap_model(self.model)
self._save(output_dir, state_dict=unwrapped_model.state_dict()) | --- +++ @@ -62,6 +62,7 @@
class CustomPPOTrainer(PPOTrainer, Trainer):
+ r"""Inherit PPOTrainer."""
def __init__(
self,
@@ -197,6 +198,7 @@ self.add_callback(BAdamCallback)
def ppo_train(self, resume_from_checkpoint: Optional[str] = None) -> None:
+ r"""Implement training loop for the PPO stage, like _inner_training_loop() in Huggingface's Trainer."""
if resume_from_checkpoint is not None:
raise ValueError("`resume_from_checkpoint` will be supported in the future version.")
@@ -347,6 +349,7 @@
@torch.no_grad()
def get_inputs(self, batch: dict[str, "torch.Tensor"]) -> tuple[list["torch.Tensor"], list["torch.Tensor"]]:
+ r"""Generate model's responses given queries."""
if batch["input_ids"].size(0) == 1: # handle llama2 ppo with gradient accumulation > 1
start_index = (batch["input_ids"][0] != self.tokenizer.pad_token_id).nonzero()[0].item()
for k, v in batch.items():
@@ -388,6 +391,10 @@ queries: list["torch.Tensor"],
responses: list["torch.Tensor"],
) -> list["torch.Tensor"]:
+ r"""Compute scores using given reward model.
+
+ Both inputs and outputs are put on CPU.
+ """
if self.finetuning_args.reward_model_type == "api":
token_ids = [torch.cat((q, r), dim=-1).tolist() for q, r in zip(queries, responses)]
messages = self.tokenizer.batch_decode(token_ids, skip_special_tokens=False)
@@ -421,6 +428,10 @@ return_logits: bool = False,
response_masks: Optional["torch.Tensor"] = None,
) -> tuple["torch.Tensor", Optional["torch.Tensor"], "torch.Tensor", "torch.Tensor"]:
+ r"""Calculate model outputs in multiple batches.
+
+ Subclass and override to inject custom behavior.
+ """
from trl.core import logprobs_from_logits
torch_gc()
@@ -479,6 +490,10 @@
@override
def save_model(self, output_dir: Optional[str] = None) -> None:
+ r"""Save model checkpoint.
+
+ Subclass and override to inject custom behavior.
+ """
if output_dir is None:
output_dir = self.args.output_dir
@@ -500,4 +515,4 @@
elif self.args.should_save:
unwrapped_model: AutoModelForCausalLMWithValueHead = self.accelerator.unwrap_model(self.model)
- self._save(output_dir, state_dict=unwrapped_model.state_dict())+ self._save(output_dir, state_dict=unwrapped_model.state_dict())
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/ppo/trainer.py |
Generate missing documentation strings | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/trainer_seq2seq.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from functools import partial
from types import MethodType
from typing import TYPE_CHECKING, Any, Optional, Union
import numpy as np
import torch
from transformers import Seq2SeqTrainer
from typing_extensions import override
from ...extras import logging
from ...extras.constants import IGNORE_INDEX
from ..callbacks import SaveProcessorCallback
from ..fp8_utils import configure_fp8_environment, patch_accelerator_for_fp8, verify_fp8_status
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler
if TYPE_CHECKING:
from torch.utils.data import Dataset
from transformers import ProcessorMixin
from transformers.trainer import PredictionOutput
from ...hparams import FinetuningArguments, ModelArguments, TrainingArguments
logger = logging.get_logger(__name__)
class CustomSeq2SeqTrainer(Seq2SeqTrainer):
def __init__(
self,
finetuning_args: "FinetuningArguments",
processor: Optional["ProcessorMixin"],
model_args: Optional["ModelArguments"] = None,
gen_kwargs: Optional[dict[str, Any]] = None,
ref_model: Optional["torch.nn.Module"] = None,
**kwargs,
) -> None:
kwargs["processing_class"] = kwargs.pop("tokenizer")
# Configure FP8 environment if enabled
training_args: TrainingArguments = kwargs.get("args")
if training_args.fp8:
configure_fp8_environment(training_args)
if getattr(training_args, "fp8_backend", "auto") == "te":
patch_accelerator_for_fp8()
super().__init__(**kwargs)
if processor is not None:
# avoid wrong loss under gradient accumulation
# https://github.com/huggingface/transformers/pull/36044#issuecomment-2746657112
self.model_accepts_loss_kwargs = False
self.finetuning_args = finetuning_args
if gen_kwargs is not None:
# https://github.com/huggingface/transformers/blob/v4.45.0/src/transformers/trainer_seq2seq.py#L287
self._gen_kwargs = gen_kwargs
if processor is not None:
self.add_callback(SaveProcessorCallback(processor))
if finetuning_args.use_badam:
from badam import BAdamCallback, clip_grad_norm_old_version # type: ignore
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
self.add_callback(BAdamCallback)
self.ref_model = ref_model
if ref_model is not None:
from trl.models.utils import prepare_deepspeed, prepare_fsdp
if getattr(self.accelerator.state, "deepspeed_plugin", None) is not None:
if not (
getattr(ref_model, "is_loaded_in_8bit", False) or getattr(ref_model, "is_loaded_in_4bit", False)
): # quantized models are already set on the correct device
self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator)
elif getattr(self.accelerator.state, "fsdp_plugin", None) is not None:
if self.accelerator.is_fsdp2:
from accelerate.utils.fsdp_utils import fsdp2_prepare_model
self.ref_model = fsdp2_prepare_model(self.accelerator, self.ref_model)
else:
self.ref_model = prepare_fsdp(self.ref_model, self.accelerator)
else:
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
self.ref_model.eval()
if finetuning_args.use_dft_loss:
from ..trainer_utils import dft_loss_func
self.compute_loss_func = dft_loss_func
elif finetuning_args.use_eaft_loss:
from ..trainer_utils import eaft_loss_func
self.compute_loss_func = lambda outputs, labels, num_items_in_batch=None: eaft_loss_func(
outputs, labels, num_items_in_batch, finetuning_args.eaft_alpha
)
elif finetuning_args.use_asft_loss:
from ..trainer_utils import asft_loss_func
self.compute_loss_func = partial(
asft_loss_func,
asft_alpha=finetuning_args.asft_alpha,
)
if training_args.fp8 and hasattr(self, "accelerator"): # verify FP8 status after trainer initialization
verify_fp8_status(self.accelerator, training_args)
@override
def create_optimizer(self) -> "torch.optim.Optimizer":
if self.optimizer is None:
self.optimizer = create_custom_optimizer(self.model, self.args, self.finetuning_args)
return super().create_optimizer()
@override
def create_scheduler(
self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None
) -> "torch.optim.lr_scheduler.LRScheduler":
create_custom_scheduler(self.args, num_training_steps, optimizer)
return super().create_scheduler(num_training_steps, optimizer)
@override
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
if self.finetuning_args.disable_shuffling:
return torch.utils.data.SequentialSampler(self.train_dataset)
return super()._get_train_sampler(*args, **kwargs)
@override
def compute_loss(self, model, inputs, *args, **kwargs):
if self.finetuning_args.use_asft_loss:
with torch.no_grad():
ref_outputs = self.ref_model(
input_ids=inputs["input_ids"],
attention_mask=inputs.get("attention_mask", None),
)
ref_logits = ref_outputs.logits
outputs = model(**inputs)
return self.compute_loss_func(outputs, inputs["labels"], ref_logits)
else:
return super().compute_loss(model, inputs, *args, **kwargs)
@override
def prediction_step(
self,
model: "torch.nn.Module",
inputs: dict[str, Union["torch.Tensor", Any]],
prediction_loss_only: bool,
ignore_keys: Optional[list[str]] = None,
**gen_kwargs,
) -> tuple[Optional[float], Optional["torch.Tensor"], Optional["torch.Tensor"]]:
if self.args.predict_with_generate: # do not pass labels to model when generate
labels = inputs.pop("labels", None)
else:
labels = inputs.get("labels")
loss, generated_tokens, _ = super().prediction_step(
model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys, **gen_kwargs
)
if generated_tokens is not None and self.args.predict_with_generate:
generated_tokens[:, : inputs["input_ids"].size(-1)] = self.processing_class.pad_token_id
generated_tokens = generated_tokens.contiguous()
return loss, generated_tokens, labels
def save_predictions(
self, dataset: "Dataset", predict_results: "PredictionOutput", skip_special_tokens: bool = True
) -> None:
if not self.is_world_process_zero():
return
output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl")
logger.info_rank0(f"Saving prediction results to {output_prediction_file}")
labels = np.where(
predict_results.label_ids != IGNORE_INDEX, predict_results.label_ids, self.processing_class.pad_token_id
)
preds = np.where(
predict_results.predictions != IGNORE_INDEX,
predict_results.predictions,
self.processing_class.pad_token_id,
)
for i in range(len(preds)):
pad_len = np.nonzero(preds[i] != self.processing_class.pad_token_id)[0]
if len(pad_len): # move pad token to last
preds[i] = np.concatenate((preds[i][pad_len[0] :], preds[i][: pad_len[0]]), axis=-1)
input_ids_column = dataset["input_ids"]
try:
input_ids_list = input_ids_column.to_pylist()
except AttributeError:
input_ids_list = list(input_ids_column)
decoded_inputs = self.processing_class.batch_decode(input_ids_list, skip_special_tokens=False)
decoded_preds = self.processing_class.batch_decode(preds, skip_special_tokens=skip_special_tokens)
decoded_labels = self.processing_class.batch_decode(labels, skip_special_tokens=skip_special_tokens)
with open(output_prediction_file, "w", encoding="utf-8") as f:
for text, pred, label in zip(decoded_inputs, decoded_preds, decoded_labels):
f.write(json.dumps({"prompt": text, "predict": pred, "label": label}, ensure_ascii=False) + "\n") | --- +++ @@ -45,6 +45,7 @@
class CustomSeq2SeqTrainer(Seq2SeqTrainer):
+ r"""Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE."""
def __init__(
self,
@@ -169,6 +170,10 @@ ignore_keys: Optional[list[str]] = None,
**gen_kwargs,
) -> tuple[Optional[float], Optional["torch.Tensor"], Optional["torch.Tensor"]]:
+ r"""Remove the prompt part in the generated tokens.
+
+ Subclass and override to inject custom behavior.
+ """
if self.args.predict_with_generate: # do not pass labels to model when generate
labels = inputs.pop("labels", None)
else:
@@ -186,6 +191,10 @@ def save_predictions(
self, dataset: "Dataset", predict_results: "PredictionOutput", skip_special_tokens: bool = True
) -> None:
+ r"""Save model predictions to `output_dir`.
+
+ A custom behavior that not contained in Seq2SeqTrainer.
+ """
if not self.is_world_process_zero():
return
@@ -218,4 +227,4 @@
with open(output_prediction_file, "w", encoding="utf-8") as f:
for text, pred, label in zip(decoded_inputs, decoded_preds, decoded_labels):
- f.write(json.dumps({"prompt": text, "predict": pred, "label": label}, ensure_ascii=False) + "\n")+ f.write(json.dumps({"prompt": text, "predict": pred, "label": label}, ensure_ascii=False) + "\n")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/sft/trainer.py |
Create docstrings for all classes and functions | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from ....utils.constants import IGNORE_INDEX
from ....utils.helper import get_tokenizer
from ....utils.types import Message, ModelInput, Processor, ToolCall
from ..rendering import RenderingPlugin
def _update_model_input(
processor: Processor,
input_ids: list[int],
labels: list[int],
loss_weights: list[int],
temp_str: str,
temp_weight: float,
) -> str:
if not temp_str:
return ""
tokenizer = get_tokenizer(processor)
temp_ids = tokenizer.encode(temp_str, add_special_tokens=False)
input_ids.extend(temp_ids)
loss_weights.extend([temp_weight] * len(temp_ids))
if temp_weight > 1e-6:
labels.extend(temp_ids)
else:
labels.extend([IGNORE_INDEX] * len(temp_ids))
return ""
def _concat_text_content(message: Message) -> str:
message_text = ""
for content in message["content"]:
if content["type"] == "text":
message_text += content["value"]
else:
raise ValueError(f"Unsupported content type: {content['type']}")
return message_text
@RenderingPlugin("qwen3_nothink").register("render_messages")
def render_qwen3_nothink_messages(
processor: Processor,
messages: list[Message],
tools: str | None = None,
is_generate: bool = False,
enable_thinking: bool = False,
) -> ModelInput:
input_ids, labels, loss_weights = [], [], []
temp_str, temp_weight = "", 0.0
if tools:
temp_str += "<|im_start|>system\n"
if messages[0]["role"] == "system":
temp_str += _concat_text_content(messages[0]) + "\n\n"
temp_weight = messages[0].get("loss_weight", 0.0)
temp_str += (
"# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
"You are provided with function signatures within <tools></tools> XML tags:\n<tools>"
)
try:
tools = json.loads(tools)
except json.JSONDecodeError:
raise ValueError(f"Invalid tools format: {str(tools)}.")
if not isinstance(tools, list):
tools = [tools]
for tool in tools:
temp_str += "\n" + json.dumps(tool, ensure_ascii=False)
temp_str += (
"\n</tools>\n\nFor each function call, return a json object with function name "
'and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{"name": '
'<function-name>, "arguments": <args-json-object>}\n</tool_call><|im_end|>\n'
)
elif messages[0]["role"] == "system":
temp_str += "<|im_start|>system\n" + _concat_text_content(messages[0]) + "<|im_end|>\n"
temp_weight = messages[0].get("loss_weight", 0.0)
temp_str = _update_model_input(processor, input_ids, labels, loss_weights, temp_str, temp_weight)
for turn_idx, message in enumerate(messages):
if message["role"] == "user" or (message["role"] == "system" and turn_idx != 0):
temp_str += "<|im_start|>" + message["role"] + "\n" + _concat_text_content(message) + "<|im_end|>\n"
temp_weight = message.get("loss_weight", 0.0)
elif message["role"] == "assistant":
temp_str += "<|im_start|>" + message["role"] + "\n"
for val_idx, content in enumerate(message["content"]):
if content["type"] == "text":
temp_str += content["value"]
elif content["type"] == "reasoning":
temp_str += "<thinking>\n" + content["value"] + "\n</thinking>\n\n" # avoid using special tokens
elif content["type"] == "tool_call":
if val_idx != 0 and message["content"][val_idx - 1]["type"] in ["text", "tool_call"]:
temp_str += "\n"
try:
tool_call: ToolCall = json.loads(content["value"])
except json.JSONDecodeError:
raise ValueError(f"Invalid tool call format: {content['value']}.")
temp_str += (
'<tool_call>\n{"name": "'
+ tool_call["name"]
+ '", "arguments": '
+ json.dumps(tool_call["arguments"], ensure_ascii=False)
+ "}\n</tool_call>"
)
else:
raise ValueError(f"Unsupported content type: {content['type']}")
temp_str += "<|im_end|>\n"
temp_weight = message.get("loss_weight", 1.0)
elif message["role"] == "tool":
if turn_idx == 0 or messages[turn_idx - 1]["role"] != "tool":
temp_str += "<|im_start|>user"
temp_str += "\n<tool_response>\n" + _concat_text_content(message) + "\n</tool_response>"
if turn_idx == len(messages) - 1 or messages[turn_idx + 1]["role"] != "tool":
temp_str += "<|im_end|>\n"
temp_weight = message.get("loss_weight", 0.0)
temp_str = _update_model_input(processor, input_ids, labels, loss_weights, temp_str, temp_weight)
if is_generate:
temp_str += "<|im_start|>assistant\n"
temp_weight = 0.0
if enable_thinking:
raise ValueError("The qwen3_nothink template does not support thinking mode.")
temp_str = _update_model_input(processor, input_ids, labels, loss_weights, temp_str, temp_weight)
attention_mask = [1] * len(input_ids)
return ModelInput(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
loss_weights=loss_weights,
)
@RenderingPlugin("qwen3_nothink").register("parse_message")
def parse_qwen3_nothink_message(generated_text: str) -> Message:
pattern = re.compile(r"<(thinking|tool_call)>\s*(.*?)\s*</\1>\s*", re.DOTALL)
content = []
last_end = 0
for match in pattern.finditer(generated_text):
start, end = match.span()
if start > last_end:
text = generated_text[last_end:start].strip()
if text:
content.append({"type": "text", "value": text})
tag_type = match.group(1)
tag_value = match.group(2).strip()
if tag_type == "thinking":
content.append({"type": "reasoning", "value": tag_value.strip()})
elif tag_type == "tool_call":
try:
json.loads(tag_value.strip())
except json.JSONDecodeError:
raise ValueError(f"Invalid tool call format: {tag_value.strip()}.")
content.append({"type": "tool_call", "value": tag_value.strip()})
last_end = end
if last_end < len(generated_text):
text = generated_text[last_end:].strip()
if text:
content.append({"type": "text", "value": text})
return Message(role="assistant", content=content) | --- +++ @@ -29,6 +29,7 @@ temp_str: str,
temp_weight: float,
) -> str:
+ """Update model input with temporary string."""
if not temp_str:
return ""
@@ -45,6 +46,7 @@
def _concat_text_content(message: Message) -> str:
+ """Concatenate text fields in a message."""
message_text = ""
for content in message["content"]:
if content["type"] == "text":
@@ -63,6 +65,10 @@ is_generate: bool = False,
enable_thinking: bool = False,
) -> ModelInput:
+ """Render messages in the Qwen3 nothink template format.
+
+ See https://huggingface.co/spaces/huggingfacejs/chat-template-playground?modelId=Qwen/Qwen3-4B-Instruct-2507
+ """
input_ids, labels, loss_weights = [], [], []
temp_str, temp_weight = "", 0.0
if tools:
@@ -162,6 +168,14 @@
@RenderingPlugin("qwen3_nothink").register("parse_message")
def parse_qwen3_nothink_message(generated_text: str) -> Message:
+ """Parse a message in the Qwen3 nothink template format. Supports interleaved reasoning and tool calls.
+
+ Args:
+ generated_text (str): The generated text in the Qwen3 nothink template format.
+
+ Returns:
+ Message: The parsed message.
+ """
pattern = re.compile(r"<(thinking|tool_call)>\s*(.*?)\s*</\1>\s*", re.DOTALL)
content = []
last_end = 0
@@ -192,4 +206,4 @@ if text:
content.append({"type": "text", "value": text})
- return Message(role="assistant", content=content)+ return Message(role="assistant", content=content)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/templates/qwen3_nothink.py |
Document functions with clear intent | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from typing import Any, Literal, NotRequired, TypedDict
from ...utils import logging
from ...utils.plugin import BasePlugin
from ...utils.types import DPOSample, Sample, SFTSample, ToolCall
logger = logging.get_logger(__name__)
class AlpacaSample(TypedDict, total=False):
system: NotRequired[str]
instruction: str
input: NotRequired[str]
output: str
SharegptMessage = TypedDict(
"SharegptMessage",
{"from": Literal["human", "gpt", "system", "function_call", "observation"], "value": str},
)
class SharegptSample(TypedDict, total=False):
conversations: list[SharegptMessage]
tools: NotRequired[str]
class OpenaiMessage(TypedDict, total=False):
role: Literal["user", "assistant", "tool"]
content: str
class OpenaiSample(TypedDict, total=False):
messages: list[OpenaiMessage]
class PairSample(TypedDict, total=False):
chosen: list[OpenaiMessage]
rejected: list[OpenaiMessage]
class DataConverterPlugin(BasePlugin):
def __call__(self, raw_sample: dict[str, Any]) -> Sample:
return super().__call__(raw_sample)
@DataConverterPlugin("alpaca").register()
def alpaca_converter(raw_sample: AlpacaSample) -> SFTSample:
messages = []
if "system" in raw_sample:
messages.append(
{"role": "system", "content": [{"type": "text", "value": raw_sample["system"]}], "loss_weight": 0.0}
)
if "instruction" in raw_sample or "input" in raw_sample:
messages.append(
{
"role": "user",
"content": [
{"type": "text", "value": raw_sample.get("instruction", "") + raw_sample.get("input", "")}
],
"loss_weight": 0.0,
}
)
if "output" in raw_sample:
messages.append(
{"role": "assistant", "content": [{"type": "text", "value": raw_sample["output"]}], "loss_weight": 1.0}
)
return {"messages": messages}
@DataConverterPlugin("sharegpt").register()
def sharegpt_converter(raw_sample: SharegptSample) -> SFTSample:
tag_mapping = {
"system": "system",
"human": "user",
"gpt": "assistant",
"observation": "tool",
"function_call": "assistant",
}
sample = {}
messages = []
for message in raw_sample.get("conversations", []):
tag = message["from"]
if tag not in tag_mapping:
logger.warning_rank0(f"Unsupported role tag {tag} in message: {message}")
elif tag == "function_call":
try:
tool_calls: ToolCall | list[ToolCall] = json.loads(message["value"])
except json.JSONDecodeError:
logger.warning_rank0(f"Invalid tool call format: {str(message['value'])}")
continue
if not isinstance(tool_calls, list):
tool_calls = [tool_calls]
messages.append(
{
"role": "assistant",
"content": [{"type": "tool_call", "value": json.dumps(tool_call)} for tool_call in tool_calls],
"loss_weight": 1.0,
}
)
else:
messages.append(
{
"role": tag_mapping[tag],
"content": [{"type": "text", "value": message["value"]}],
"loss_weight": 1.0 if tag == "gpt" else 0.0,
}
)
sample["messages"] = messages
tools = raw_sample.get("tools")
if tools:
try:
tools: list[dict[str, Any]] = json.loads(tools)
sample["tools"] = json.dumps(tools)
except json.JSONDecodeError:
logger.warning_rank0(f"Invalid tools format: {str(tools)}")
return sample
@DataConverterPlugin("pair").register()
def pair_converter(raw_sample: PairSample) -> DPOSample:
def process_message(raw_messages: list[OpenaiMessage]):
messages = []
for message in raw_messages:
if message["role"] == "tool":
try:
tool_calls: ToolCall | list[ToolCall] = json.loads(message["content"])
except json.JSONDecodeError:
logger.warning_rank0(f"Invalid tool call format: {str(message['content'])}")
continue
if not isinstance(tool_calls, list):
tool_calls = [tool_calls]
messages.append(
{
"role": message["role"],
"content": [{"type": "tool_call", "value": json.dumps(tool_call)} for tool_call in tool_calls],
"loss_weight": 1.0 if message["role"] == "assistant" else 0.0,
}
)
else:
messages.append(
{
"role": message["role"],
"content": [{"type": "text", "value": message["content"]}],
"loss_weight": 1.0 if message["role"] == "assistant" else 0.0,
}
)
return messages
sample = {}
sample["chosen_messages"] = process_message(raw_sample.get("chosen", []))
sample["rejected_messages"] = process_message(raw_sample.get("rejected", []))
tools = raw_sample.get("tools")
if tools:
try:
tools: list[dict[str, Any]] = json.loads(tools)
sample["tools"] = json.dumps(tools)
except json.JSONDecodeError:
logger.warning_rank0(f"Invalid tools format: {str(tools)}")
return sample | --- +++ @@ -57,6 +57,7 @@
class DataConverterPlugin(BasePlugin):
+ """Plugin for data converters."""
def __call__(self, raw_sample: dict[str, Any]) -> Sample:
return super().__call__(raw_sample)
@@ -64,6 +65,16 @@
@DataConverterPlugin("alpaca").register()
def alpaca_converter(raw_sample: AlpacaSample) -> SFTSample:
+ """Convert Alpaca sample to SFT sample.
+
+ See raw example at: https://huggingface.co/datasets/llamafactory/alpaca_gpt4_en
+
+ Args:
+ raw_sample (AlpacaSample): Alpaca sample.
+
+ Returns:
+ SFTSample: SFT sample.
+ """
messages = []
if "system" in raw_sample:
messages.append(
@@ -91,6 +102,16 @@
@DataConverterPlugin("sharegpt").register()
def sharegpt_converter(raw_sample: SharegptSample) -> SFTSample:
+ """Convert ShareGPT sample to SFT sample.
+
+ See raw example at: https://huggingface.co/datasets/llamafactory/glaive_toolcall_en
+
+ Args:
+ raw_sample (SharegptSample): ShareGPT sample.
+
+ Returns:
+ SFTSample: SFT sample.
+ """
tag_mapping = {
"system": "system",
"human": "user",
@@ -145,6 +166,16 @@
@DataConverterPlugin("pair").register()
def pair_converter(raw_sample: PairSample) -> DPOSample:
+ """Convert Pair sample to DPO sample.
+
+ See raw example at: https://huggingface.co/datasets/HuggingFaceH4/orca_dpo_pairs
+
+ Args:
+ raw_sample (PairSample): pair sample with chosen, rejected fields.
+
+ Returns:
+ DPOSample: DPO sample with chosen_messages and rejected_messages.
+ """
def process_message(raw_messages: list[OpenaiMessage]):
messages = []
@@ -189,4 +220,4 @@ except json.JSONDecodeError:
logger.warning_rank0(f"Invalid tools format: {str(tools)}")
- return sample+ return sample
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/data_plugins/converter.py |
Generate documentation strings for clarity | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/training_args.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from enum import StrEnum, unique
class PluginConfig(dict):
@property
def name(self) -> str:
if "name" not in self:
raise ValueError("Plugin configuration must have a 'name' field.")
return self["name"]
PluginArgument = PluginConfig | dict | str | None
@unique
class ModelClass(StrEnum):
LLM = "llm"
CLS = "cls"
OTHER = "other"
@unique
class SampleBackend(StrEnum):
HF = "hf"
VLLM = "vllm"
@unique
class BatchingStrategy(StrEnum):
NORMAL = "normal"
PADDING_FREE = "padding_free"
DYNAMIC_BATCHING = "dynamic_batching"
DYNAMIC_PADDING_FREE = "dynamic_padding_free"
def _convert_str_dict(data: dict) -> dict:
for key, value in data.items():
if isinstance(value, dict):
data[key] = _convert_str_dict(value)
elif isinstance(value, str):
if value.lower() in ("true", "false"):
data[key] = value.lower() == "true"
elif value.isdigit():
data[key] = int(value)
elif value.replace(".", "", 1).isdigit():
data[key] = float(value)
return data
def get_plugin_config(config: PluginArgument) -> PluginConfig | None:
if config is None:
return None
if isinstance(config, str) and config.startswith("{"):
config = json.loads(config)
config = _convert_str_dict(config)
if "name" not in config:
raise ValueError("Plugin configuration must have a 'name' field.")
return PluginConfig(config) | --- +++ @@ -21,9 +21,11 @@
class PluginConfig(dict):
+ """Dictionary that allows attribute access."""
@property
def name(self) -> str:
+ """Plugin name."""
if "name" not in self:
raise ValueError("Plugin configuration must have a 'name' field.")
@@ -35,6 +37,7 @@
@unique
class ModelClass(StrEnum):
+ """Auto class for model config."""
LLM = "llm"
CLS = "cls"
@@ -56,6 +59,14 @@
def _convert_str_dict(data: dict) -> dict:
+ """Parse string representation inside the dictionary.
+
+ Args:
+ data: The string or dictionary to convert.
+
+ Returns:
+ The converted dictionary.
+ """
for key, value in data.items():
if isinstance(value, dict):
data[key] = _convert_str_dict(value)
@@ -71,6 +82,14 @@
def get_plugin_config(config: PluginArgument) -> PluginConfig | None:
+ """Get the plugin configuration from the argument value.
+
+ Args:
+ config: The argument value to get the plugin configuration from.
+
+ Returns:
+ The plugin configuration.
+ """
if config is None:
return None
@@ -81,4 +100,4 @@ if "name" not in config:
raise ValueError("Plugin configuration must have a 'name' field.")
- return PluginConfig(config)+ return PluginConfig(config)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/config/arg_utils.py |
Generate consistent documentation across files | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
from typing import Any, Literal
from datasets import load_dataset
from ...utils.plugin import BasePlugin
from ...utils.types import DatasetInfo, HFDataset
class DataLoaderPlugin(BasePlugin):
def load(self, dataset_info: DatasetInfo) -> HFDataset:
path = dataset_info["path"]
split = dataset_info.get("split", "train")
streaming = dataset_info.get("streaming", False)
return super().__call__(path, split, streaming)
def _get_builder_name(path: str) -> Literal["arrow", "csv", "json", "parquet", "text"]:
filetype = os.path.splitext(path)[-1][1:]
if filetype in ["arrow", "csv", "json", "jsonl", "parquet", "txt"]:
return filetype.replace("jsonl", "json").replace("txt", "text")
else:
raise ValueError(f"Unknown dataset filetype: {filetype}.")
@DataLoaderPlugin("local").register()
def load_data_from_file(filepath: str, split: str, streaming: bool) -> HFDataset:
if os.path.isdir(filepath):
filetype = _get_builder_name(os.listdir(filepath)[0])
dataset = load_dataset(filetype, data_dir=filepath, split=split)
elif os.path.isfile(filepath):
filetype = _get_builder_name(filepath)
dataset = load_dataset(filetype, data_files=filepath, split=split)
else:
raise ValueError(f"Can not load dataset from {filepath}.")
if streaming: # faster when data is streamed from local files
dataset = dataset.to_iterable_dataset()
return dataset
def adjust_data_index(
data_index: list[tuple[str, int]], size: int | None, weight: float | None
) -> list[tuple[str, int]]:
if size is not None:
data_index = random.choices(data_index, k=size)
if weight is not None:
data_index = random.choices(data_index, k=int(len(data_index) * weight))
return data_index
def select_data_sample(
data_index: list[tuple[str, int]], index: slice | list[int] | Any
) -> tuple[str, int] | list[tuple[str, int]]:
if isinstance(index, slice):
return [data_index[i] for i in range(*index.indices(len(data_index)))]
elif isinstance(index, list):
return [data_index[i] for i in index]
else:
raise ValueError(f"Invalid index type {type(index)}.") | --- +++ @@ -24,6 +24,7 @@
class DataLoaderPlugin(BasePlugin):
+ """Plugin for loading dataset."""
def load(self, dataset_info: DatasetInfo) -> HFDataset:
path = dataset_info["path"]
@@ -33,6 +34,14 @@
def _get_builder_name(path: str) -> Literal["arrow", "csv", "json", "parquet", "text"]:
+ """Get dataset builder name.
+
+ Args:
+ path (str): Dataset path.
+
+ Returns:
+ Literal["arrow", "csv", "json", "parquet", "text"]: Dataset builder name.
+ """
filetype = os.path.splitext(path)[-1][1:]
if filetype in ["arrow", "csv", "json", "jsonl", "parquet", "txt"]:
return filetype.replace("jsonl", "json").replace("txt", "text")
@@ -60,6 +69,16 @@ def adjust_data_index(
data_index: list[tuple[str, int]], size: int | None, weight: float | None
) -> list[tuple[str, int]]:
+ """Adjust dataset index by size and weight.
+
+ Args:
+ data_index (list[tuple[str, int]]): List of (dataset_name, sample_index).
+ size (Optional[int]): Desired dataset size.
+ weight (Optional[float]): Desired dataset weight.
+
+ Returns:
+ list[tuple[str, int]]: Adjusted dataset index.
+ """
if size is not None:
data_index = random.choices(data_index, k=size)
@@ -72,9 +91,18 @@ def select_data_sample(
data_index: list[tuple[str, int]], index: slice | list[int] | Any
) -> tuple[str, int] | list[tuple[str, int]]:
+ """Select dataset samples.
+
+ Args:
+ data_index (list[tuple[str, int]]): List of (dataset_name, sample_index).
+ index (Union[slice, list[int], Any]): Index of dataset samples.
+
+ Returns:
+ Union[tuple[str, int], list[tuple[str, int]]]: Selected dataset samples.
+ """
if isinstance(index, slice):
return [data_index[i] for i in range(*index.indices(len(data_index)))]
elif isinstance(index, list):
return [data_index[i] for i in index]
else:
- raise ValueError(f"Invalid index type {type(index)}.")+ raise ValueError(f"Invalid index type {type(index)}.")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/data_plugins/loader.py |
Document all public functions with docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import Literal, TypedDict, Union
import torch
from peft import LoraConfig, PeftModel, TaskType, get_peft_model
from ...config import InputArgument, get_args
from ...core.model_engine import ModelEngine
from ...utils import logging
from ...utils.plugin import BasePlugin
from ...utils.types import HFModel
logger = logging.get_logger(__name__)
class LoraConfigDict(TypedDict, total=False):
name: Literal["lora"]
"""Plugin name."""
r: int
"""Lora rank."""
lora_alpha: int
"""Lora alpha."""
lora_dropout: float
"""Lora dropout."""
target_modules: Union[list[str], str]
"""Target modules."""
use_rslora: bool
"""Use RS-LoRA."""
use_dora: bool
"""Use DoRA."""
modules_to_save: list[str]
"""Modules to save."""
adapter_name_or_path: Union[list[str], str]
"""Path to the adapter(s)."""
export_dir: str
"""Path to the export directory."""
export_size: int
"""Shard size for the export model."""
export_hub_model_id: str
"""Hub model ID for the export model."""
infer_dtype: Literal["auto", "float16", "float32", "bfloat16"]
"""Inference data type for the export model."""
export_legacy_format: bool
"""Use legacy format for the export model."""
class FreezeConfigDict(TypedDict, total=False):
name: Literal["freeze"]
"""Plugin name."""
freeze_trainable_layers: int
"""Freeze trainable layers."""
freeze_trainable_modules: Union[list[str], str]
"""Freeze trainable modules."""
freeze_extra_modules: list[str]
"""Freeze extra modules."""
cast_trainable_params_to_fp32: bool
"""Cast trainable params to fp32."""
class PeftPlugin(BasePlugin):
def __call__(self, model: HFModel, config: dict, is_train: bool) -> HFModel:
return super().__call__(model, config, is_train)
def _find_all_linear_modules(model: HFModel) -> list[str]:
forbidden_modules = {"lm_head", "output_layer", "output"}
module_names = set()
for name, module in model.named_modules():
if any(forbidden_module in name for forbidden_module in forbidden_modules):
continue
if "Linear" in module.__class__.__name__ and "Embedding" not in module.__class__.__name__:
module_names.add(name.split(".")[-1])
return list(module_names)
def merge_adapters(model: HFModel, adapter_name_or_path: Union[list[str], str]) -> HFModel:
if not isinstance(adapter_name_or_path, list):
adapter_name_or_path = [adapter_name_or_path]
for adapter_path in adapter_name_or_path:
model = PeftModel.from_pretrained(model, adapter_path)
model = model.merge_and_unload()
logger.info_rank0(f"Merged adapter from {adapter_path}")
return model
def load_adapter(model: HFModel, adapter_name_or_path: Union[list[str], str], is_train: bool) -> HFModel:
if not isinstance(adapter_name_or_path, list):
adapter_name_or_path = [adapter_name_or_path]
# TODO
# Adapters fix for deepspeed and quant
# Adapters fix for vision
if is_train and len(adapter_name_or_path) > 1:
raise ValueError(
"When `adapter_name_or_path` is provided for training, only a single LoRA adapter is supported. "
"Training will continue on the specified adapter. "
"Please merge multiple adapters before starting a new LoRA adapter."
)
if is_train:
adapter_to_merge = []
adapter_to_resume = adapter_name_or_path[0]
else:
adapter_to_merge = adapter_name_or_path
adapter_to_resume = None
if adapter_to_merge:
model = merge_adapters(model, adapter_to_merge)
if adapter_to_resume is not None:
model = PeftModel.from_pretrained(model, adapter_to_resume, is_trainable=is_train)
if is_train:
logger.info_rank0(
f"Resuming training from existing LoRA adapter at {adapter_to_resume}. "
"LoRA hyperparameters will be loaded from the adapter itself; "
"the current LoRA configuration will be ignored. "
"Merge the adapter into the base model before training if you want to start a new adapter."
)
return model
@PeftPlugin("lora").register()
def get_lora_model(model: HFModel, config: LoraConfigDict, is_train: bool = False) -> HFModel:
if model.device.type == "meta":
raise ValueError("Currently lora stage does not support loading model by meta.")
adapter_name_or_path = config.get("adapter_name_or_path")
if adapter_name_or_path:
return load_adapter(model, adapter_name_or_path, is_train)
logger.info_rank0("Fine-tuning method: LoRA")
target_modules = config.get("target_modules", "all")
# Handle target modules
if target_modules == "all":
target_modules = _find_all_linear_modules(model)
elif isinstance(target_modules, str):
target_modules = [target_modules]
logger.info_rank0(f"LoRA target modules: {target_modules}")
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
inference_mode=not is_train,
r=config.get("r", 8),
lora_alpha=config.get("lora_alpha", 16),
lora_dropout=config.get("lora_dropout", 0.05),
use_rslora=config.get("use_rslora", False),
use_dora=config.get("use_dora", False),
target_modules=target_modules,
modules_to_save=config.get("modules_to_save", None),
)
model = get_peft_model(model, peft_config)
if is_train:
model.print_trainable_parameters()
return model
@PeftPlugin("freeze").register()
def get_freeze_model(model: HFModel, config: FreezeConfigDict, is_train: bool = False) -> HFModel:
logger.info_rank0("Fine-tuning method: Freeze")
if not is_train:
return model
freeze_trainable_layers = config.get("freeze_trainable_layers", 2)
freeze_trainable_modules = config.get("freeze_trainable_modules", ["all"])
freeze_extra_modules = config.get("freeze_extra_modules", [])
cast_trainable_params_to_fp32 = config.get("cast_trainable_params_to_fp32", True)
if isinstance(freeze_trainable_modules, str):
freeze_trainable_modules = [module.strip() for module in freeze_trainable_modules.split(",")]
if isinstance(freeze_extra_modules, str):
freeze_extra_modules = [module.strip() for module in freeze_extra_modules.split(",")]
# Get number of layers
num_layers = (
getattr(model.config, "num_hidden_layers", None)
or getattr(model.config, "num_layers", None)
or getattr(model.config, "n_layer", None)
)
if not num_layers:
raise ValueError("Current model does not support freeze tuning.")
if freeze_trainable_layers > 0:
# last n layers
trainable_layer_ids = range(max(0, num_layers - freeze_trainable_layers), num_layers)
else:
# first n layers
trainable_layer_ids = range(min(-freeze_trainable_layers, num_layers))
# Identify hidden and non-hidden modules
hidden_modules = set()
non_hidden_modules = set()
for name, _ in model.named_parameters():
if ".0." in name:
hidden_modules.add(name.split(".0.")[-1].split(".")[0])
elif ".1." in name:
hidden_modules.add(name.split(".1.")[-1].split(".")[0])
if re.search(r"\.\d+\.", name) is None:
non_hidden_modules.add(name.split(".")[-2])
# Build list of trainable layer patterns
trainable_layers = []
for module_name in freeze_trainable_modules:
if module_name == "all":
for idx in trainable_layer_ids:
trainable_layers.append(f".{idx:d}.")
elif module_name in hidden_modules:
for idx in trainable_layer_ids:
trainable_layers.append(f".{idx:d}.{module_name}")
else:
raise ValueError(f"Module {module_name} not found in hidden modules: {hidden_modules}")
# Add extra modules
if freeze_extra_modules:
for module_name in freeze_extra_modules:
if module_name in non_hidden_modules:
trainable_layers.append(module_name)
else:
raise ValueError(f"Module {module_name} not found in non-hidden modules: {non_hidden_modules}")
# TODO
# Multi-modal special handling
# Set requires_grad
forbidden_modules = {"quant_state", "quantization_weight", "qweight", "qzeros", "scales"}
for name, param in model.named_parameters():
if any(trainable_layer in name for trainable_layer in trainable_layers) and not any(
forbidden_module in name for forbidden_module in forbidden_modules
):
param.requires_grad_(True)
if cast_trainable_params_to_fp32:
param.data = param.data.to(torch.float32) # Cast to fp32 for stability
else:
param.requires_grad_(False)
logger.info_rank0(f"Set trainable layers: {trainable_layers}")
# Count trainable params for verification
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
all_params = sum(p.numel() for p in model.parameters())
logger.info_rank0(
f"trainable params: {trainable_params} || all params: {all_params} || trainable%: {100 * trainable_params / all_params:.4f}"
)
return model
def merge_and_export_model(args: InputArgument = None):
model_args, _, _, _ = get_args(args)
export_config = model_args.peft_config
if export_config is None:
raise ValueError("Please specify peft_config to merge and export model.")
export_dir = export_config.get("export_dir")
if export_dir is None:
raise ValueError("Please specify export_dir.")
export_size = export_config.get("export_size", 5)
export_hub_model_id = export_config.get("export_hub_model_id")
infer_dtype = export_config.get("infer_dtype", "auto")
export_legacy_format = export_config.get("export_legacy_format", False)
adapters = None
if export_config.get("name") == "lora":
adapters = export_config.get("adapter_name_or_path")
else:
raise ValueError("Currently merge and export model function is only supported for lora.")
if adapters is None:
raise ValueError("Please set adapter_name_or_path to merge adapters into base model.")
logger.info_rank0("Loading model for export...")
model_engine = ModelEngine(model_args, is_train=False)
model = model_engine.model
tokenizer = model_engine.processor
if infer_dtype == "auto":
if model.config.torch_dtype == torch.float32 and torch.cuda.is_bf16_supported():
model = model.to(torch.bfloat16)
logger.info_rank0("Converted model to bfloat16.")
else:
target_dtype = getattr(torch, infer_dtype)
model = model.to(target_dtype)
logger.info_rank0(f"Converted model to {infer_dtype}.")
logger.info_rank0(f"Exporting model to {export_dir}...")
model.save_pretrained(
export_dir,
max_shard_size=f"{export_size}GB",
safe_serialization=not export_legacy_format,
)
if tokenizer is not None:
try:
if hasattr(tokenizer, "padding_side"):
tokenizer.padding_side = "left"
tokenizer.save_pretrained(export_dir)
except Exception as e:
logger.warning(f"Failed to save tokenizer: {e}")
if export_hub_model_id:
logger.info_rank0(f"Pushing to hub: {export_hub_model_id}...")
model.push_to_hub(export_hub_model_id)
if tokenizer is not None:
tokenizer.push_to_hub(export_hub_model_id)
logger.info_rank0("Model exported successfully.") | --- +++ @@ -78,6 +78,7 @@
def _find_all_linear_modules(model: HFModel) -> list[str]:
+ r"""Find all available modules to apply LoRA."""
forbidden_modules = {"lm_head", "output_layer", "output"}
module_names = set()
for name, module in model.named_modules():
@@ -103,6 +104,13 @@
def load_adapter(model: HFModel, adapter_name_or_path: Union[list[str], str], is_train: bool) -> HFModel:
+ r"""Loads adapter(s) into the model.
+
+ Determine adapter usage based on mode:
+ - Training: Load the single adapter for continued training.
+ - Inference: Merge all adapters to clean up the model.
+ - Unmergeable: Keep the single adapter active without merging.
+ """
if not isinstance(adapter_name_or_path, list):
adapter_name_or_path = [adapter_name_or_path]
@@ -335,4 +343,4 @@ if tokenizer is not None:
tokenizer.push_to_hub(export_hub_model_id)
- logger.info_rank0("Model exported successfully.")+ logger.info_rank0("Model exported successfully.")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/peft.py |
Add docstrings explaining edge cases | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's TRL library.
# https://github.com/huggingface/trl/blob/v0.8.0/trl/trainer/kto_trainer.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from collections import defaultdict
from contextlib import nullcontext
from types import MethodType
from typing import TYPE_CHECKING, Literal, Optional, Union
import torch
from transformers import Trainer
from trl import KTOTrainer
from trl.models.utils import prepare_deepspeed, prepare_fsdp
from trl.trainer import disable_dropout_in_model
from typing_extensions import override
from ...extras.constants import IGNORE_INDEX
from ...extras.packages import is_transformers_version_greater_than
from ..callbacks import SaveProcessorCallback
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler, get_batch_logps, nested_detach
if TYPE_CHECKING:
from transformers import PreTrainedModel, ProcessorMixin
from ...hparams import FinetuningArguments
class CustomKTOTrainer(KTOTrainer):
def __init__(
self,
model: Union["PreTrainedModel", torch.nn.Module],
ref_model: Optional[Union["PreTrainedModel", torch.nn.Module]],
finetuning_args: "FinetuningArguments",
processor: Optional["ProcessorMixin"],
disable_dropout: bool = True,
**kwargs,
):
if is_transformers_version_greater_than("4.46"):
kwargs["processing_class"] = kwargs.pop("tokenizer")
if disable_dropout:
disable_dropout_in_model(model)
if ref_model is not None:
disable_dropout_in_model(ref_model)
self.finetuning_args = finetuning_args
self.reference_free = False
self.use_dpo_data_collator = True # hack to avoid warning
self.generate_during_eval = False # disable at evaluation
self.label_pad_token_id = IGNORE_INDEX
self.padding_value = 0
self.is_encoder_decoder = model.config.is_encoder_decoder
self.precompute_ref_log_probs = False
self._precomputed_train_ref_log_probs = False
self._precomputed_eval_ref_log_probs = False
self._peft_has_been_casted_to_bf16 = False
self.ref_model = ref_model
self._stored_metrics = defaultdict(lambda: defaultdict(list))
# kto hyperparams
self.beta = finetuning_args.pref_beta
self.desirable_weight = finetuning_args.kto_chosen_weight
self.undesirable_weight = finetuning_args.kto_rejected_weight
self.ftx_gamma = finetuning_args.pref_ftx
# trl
# Not all losses require a KL calculation
self.calculate_KL = True
if hasattr(self, "loss_type") and self.loss_type in ["apo_zero_unpaired"]:
self.calculate_KL = False
else:
self.loss_type = "kto"
Trainer.__init__(self, model=model, **kwargs)
self.model_accepts_loss_kwargs = False # overwrite trainer's default behavior
if not hasattr(self, "accelerator"):
raise AttributeError("Please update `transformers`.")
warnings.simplefilter("ignore") # remove gc warnings on ref model
if ref_model is not None:
if self.is_deepspeed_enabled:
if not (
getattr(ref_model, "is_loaded_in_8bit", False) or getattr(ref_model, "is_loaded_in_4bit", False)
): # quantized models are already set on the correct device
self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator)
elif self.is_fsdp_enabled:
if self.accelerator.is_fsdp2:
from accelerate.utils.fsdp_utils import fsdp2_prepare_model
self.ref_model = fsdp2_prepare_model(self.accelerator, self.ref_model)
else:
self.ref_model = prepare_fsdp(self.ref_model, self.accelerator)
else:
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
self.ref_model.eval()
if processor is not None:
self.add_callback(SaveProcessorCallback(processor))
if finetuning_args.use_badam:
from badam import BAdamCallback, clip_grad_norm_old_version # type: ignore
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
self.add_callback(BAdamCallback)
@override
def create_optimizer(self) -> "torch.optim.Optimizer":
if self.optimizer is None:
self.optimizer = create_custom_optimizer(self.model, self.args, self.finetuning_args)
return super().create_optimizer()
@override
def create_scheduler(
self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None
) -> "torch.optim.lr_scheduler.LRScheduler":
create_custom_scheduler(self.args, num_training_steps, optimizer)
return super().create_scheduler(num_training_steps, optimizer)
@override
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
if self.finetuning_args.disable_shuffling:
return torch.utils.data.SequentialSampler(self.train_dataset)
return Trainer._get_train_sampler(self, *args, **kwargs)
@override
def get_batch_samples(self, *args, **kwargs):
return Trainer.get_batch_samples(self, *args, **kwargs)
@override
def forward(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"], prefix: Literal["", "kl_"] = ""
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]:
batch = nested_detach(batch, clone=True) # avoid error
model_inputs = {
"input_ids": batch[f"{prefix}input_ids"],
"attention_mask": batch[f"{prefix}attention_mask"],
}
if f"{prefix}token_type_ids" in batch:
model_inputs["token_type_ids"] = batch[f"{prefix}token_type_ids"]
if "pixel_values" in batch:
model_inputs["pixel_values"] = batch["pixel_values"]
if "image_sizes" in batch:
model_inputs["image_sizes"] = batch["image_sizes"]
if "image_grid_thw" in batch:
model_inputs["image_grid_thw"] = batch["image_grid_thw"]
if "aspect_ratio_ids" in batch:
model_inputs["aspect_ratio_ids"] = batch["aspect_ratio_ids"]
if "aspect_ratio_mask" in batch:
model_inputs["aspect_ratio_mask"] = batch["aspect_ratio_mask"]
if f"{prefix}cross_attention_mask" in batch:
model_inputs["cross_attention_mask"] = batch[f"{prefix}cross_attention_mask"]
logits = model(**model_inputs, return_dict=True, use_cache=False).logits.to(torch.float32)
logps, valid_length = get_batch_logps(logits=logits, labels=batch[f"{prefix}labels"])
return logits, logps, logps / valid_length
@override
def concatenated_forward(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"]
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor", "torch.Tensor"]:
target_logits, target_logps, target_logps_avg = self.forward(model, batch)
with torch.no_grad():
_, kl_logps, _ = self.forward(model, batch, prefix="kl_")
if len(target_logps) != len(batch["kto_tags"]):
raise ValueError("Mismatched shape of inputs and labels.")
chosen_logits = target_logits[batch["kto_tags"]]
chosen_logps = target_logps[batch["kto_tags"]]
rejected_logits = target_logits[~batch["kto_tags"]]
rejected_logps = target_logps[~batch["kto_tags"]]
chosen_logps_avg = target_logps_avg[batch["kto_tags"]]
return chosen_logps, rejected_logps, chosen_logits, rejected_logits, kl_logps, chosen_logps_avg
@override
def compute_reference_log_probs(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"]
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]:
if self.ref_model is None:
ref_model = model
ref_context = self.accelerator.unwrap_model(model).disable_adapter()
else:
ref_model = self.ref_model
ref_context = nullcontext()
with torch.no_grad(), ref_context:
reference_chosen_logps, reference_rejected_logps, _, _, reference_kl_logps, _ = self.concatenated_forward(
ref_model, batch
)
return reference_chosen_logps, reference_rejected_logps, reference_kl_logps
@override
def get_batch_loss_metrics(
self,
model: "PreTrainedModel",
batch: dict[str, "torch.Tensor"],
) -> tuple["torch.Tensor", dict[str, "torch.Tensor"]]:
metrics = {}
(
policy_chosen_logps,
policy_rejected_logps,
policy_chosen_logits,
policy_rejected_logits,
policy_kl_logps,
policy_chosen_logps_avg,
) = self.concatenated_forward(model, batch)
reference_chosen_logps, reference_rejected_logps, reference_kl_logps = self.compute_reference_log_probs(
model, batch
)
losses, chosen_rewards, rejected_rewards, kl = self.kto_loss(
policy_chosen_logps,
policy_rejected_logps,
policy_kl_logps,
reference_chosen_logps,
reference_rejected_logps,
reference_kl_logps,
)
losses = losses.nanmean()
if self.ftx_gamma > 1e-6 and len(policy_chosen_logps) > 0: # remember to rescale
sft_loss = -policy_chosen_logps_avg
losses += self.ftx_gamma * sft_loss.nanmean() / len(policy_chosen_logps) * len(batch["labels"])
num_chosen = len(chosen_rewards)
num_rejected = len(rejected_rewards)
if num_chosen > 0:
metrics["rewards/chosen_sum"] = chosen_rewards.nansum().item()
metrics["logps/chosen_sum"] = policy_chosen_logps.nansum().item()
metrics["logits/chosen_sum"] = policy_chosen_logits.nansum().item()
metrics["count/chosen"] = float(num_chosen)
if num_rejected > 0:
metrics["rewards/rejected_sum"] = rejected_rewards.nansum().item()
metrics["logps/rejected_sum"] = policy_rejected_logps.nansum().item()
metrics["logits/rejected_sum"] = policy_rejected_logits.nansum().item()
metrics["count/rejected"] = float(num_rejected)
metrics["kl"] = kl.item()
return losses, metrics
@override
def compute_loss(
self, model: "PreTrainedModel", inputs: dict[str, "torch.Tensor"], return_outputs: bool = False, **kwargs
) -> Union["torch.Tensor", tuple["torch.Tensor", list["torch.Tensor"]]]:
return super().compute_loss(model, inputs, return_outputs)
@override
def log(self, logs: dict[str, float], *args, **kwargs) -> None:
# logs either has "loss" or "eval_loss"
train_eval = "train" if "loss" in logs else "eval"
prefix = "eval_" if train_eval == "eval" else ""
# Add averaged stored metrics to logs
key_list, metric_list = [], []
for key, metrics in self._stored_metrics[train_eval].items():
key_list.append(key)
metric_list.append(torch.tensor(metrics, dtype=torch.float).to(self.accelerator.device).sum().item())
del self._stored_metrics[train_eval]
if len(metric_list) < 9: # pad to for all reduce
for i in range(9 - len(metric_list)):
key_list.append(f"dummy_{i}")
metric_list.append(0.0)
metric_list = torch.tensor(metric_list, dtype=torch.float).to(self.accelerator.device)
metric_list = self.accelerator.reduce(metric_list, "sum").tolist()
metric_dict: dict[str, float] = dict(zip(key_list, metric_list))
for split in ["chosen", "rejected"]: # accumulate average metrics from sums and lengths
if f"count/{split}" in metric_dict:
for key in ("rewards", "logps", "logits"):
logs[f"{prefix}{key}/{split}"] = metric_dict[f"{key}/{split}_sum"] / metric_dict[f"count/{split}"]
del metric_dict[f"{key}/{split}_sum"]
del metric_dict[f"count/{split}"]
if f"{prefix}rewards/chosen" in logs and f"{prefix}rewards/rejected" in logs: # calculate reward margin
logs[f"{prefix}rewards/margins"] = logs[f"{prefix}rewards/chosen"] - logs[f"{prefix}rewards/rejected"]
for key, metric in metric_dict.items(): # add remaining items
if not key.startswith("dummy_"):
logs[key] = metric
return Trainer.log(self, logs, *args, **kwargs) | --- +++ @@ -134,6 +134,7 @@
@override
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
+ r"""Replace the sequential sampler of KTO Trainer created by trl with the random sampler."""
if self.finetuning_args.disable_shuffling:
return torch.utils.data.SequentialSampler(self.train_dataset)
@@ -141,12 +142,14 @@
@override
def get_batch_samples(self, *args, **kwargs):
+ r"""Replace the method of KTO Trainer with the one of the standard Trainer."""
return Trainer.get_batch_samples(self, *args, **kwargs)
@override
def forward(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"], prefix: Literal["", "kl_"] = ""
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]:
+ r"""Run forward pass and computes the log probabilities."""
batch = nested_detach(batch, clone=True) # avoid error
model_inputs = {
"input_ids": batch[f"{prefix}input_ids"],
@@ -199,6 +202,7 @@ def compute_reference_log_probs(
self, model: "PreTrainedModel", batch: dict[str, "torch.Tensor"]
) -> tuple["torch.Tensor", "torch.Tensor", "torch.Tensor"]:
+ r"""Compute log probabilities of the reference model."""
if self.ref_model is None:
ref_model = model
ref_context = self.accelerator.unwrap_model(model).disable_adapter()
@@ -219,6 +223,7 @@ model: "PreTrainedModel",
batch: dict[str, "torch.Tensor"],
) -> tuple["torch.Tensor", dict[str, "torch.Tensor"]]:
+ r"""Compute the DPO loss and other metrics for the given batch of inputs for train or test."""
metrics = {}
(
policy_chosen_logps,
@@ -266,10 +271,12 @@ def compute_loss(
self, model: "PreTrainedModel", inputs: dict[str, "torch.Tensor"], return_outputs: bool = False, **kwargs
) -> Union["torch.Tensor", tuple["torch.Tensor", list["torch.Tensor"]]]:
+ r"""Subclass and override to accept extra kwargs."""
return super().compute_loss(model, inputs, return_outputs)
@override
def log(self, logs: dict[str, float], *args, **kwargs) -> None:
+ r"""Log `logs` on the various objects watching training, including stored metrics."""
# logs either has "loss" or "eval_loss"
train_eval = "train" if "loss" in logs else "eval"
prefix = "eval_" if train_eval == "eval" else ""
@@ -302,4 +309,4 @@ if not key.startswith("dummy_"):
logs[key] = metric
- return Trainer.log(self, logs, *args, **kwargs)+ return Trainer.log(self, logs, *args, **kwargs)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/kto/trainer.py |
Add docstrings to clarify complex logic | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import types
import torch
from ......accelerator.helper import DeviceType
from ......utils.types import HFModel
from ...base import BaseKernel
from ...registry import register_kernel
try:
import torch_npu
except ImportError:
pass
def npu_swiglu_forward(self, hidden_state):
return self.down_proj(
torch_npu.npu_swiglu(torch.cat((self.gate_proj(hidden_state), self.up_proj(hidden_state)), dim=-1), dim=-1)
)
def _npu_swiglu_glm4_forward(self, hidden_states):
up_states = self.gate_up_proj(hidden_states)
gate, up_states = up_states.chunk(2, dim=-1)
return self.down_proj(torch_npu.npu_swiglu(torch.cat((gate, up_states), dim=-1), dim=-1))
def _npu_swiglu_gemma3ntext_forward(self, hidden_states):
gate_proj = self.gate_proj(hidden_states)
if self.activation_sparsity > 0.0:
gate_proj = self._gaussian_topk(gate_proj)
down_proj = self.down_proj(
torch_npu.npu_swiglu(torch.cat((gate_proj, self.up_proj(hidden_states)), dim=-1), dim=-1)
)
return down_proj
@register_kernel
class NpuSwiGluKernel(BaseKernel):
# just support apply to the following module layers
expect_modules = frozenset(
{
"Qwen3VLMoeTextMLP",
"Qwen3VLTextMLP",
"Qwen3OmniMoeThinkerTextMLP",
"Qwen3OmniMoeMLP",
"Qwen3OmniMoeTalkerTextMLP",
"Qwen3OmniMoeCode2WavMlp",
"Qwen3NextMLP",
"Qwen3MoeMLP",
"Qwen3MLP",
"Qwen2MLP",
"Qwen2MoeMLP",
"Qwen2_5_VLMLP",
"Qwen2_5OmniMLP",
"Llama4TextMLP",
"LlamaMLP",
"Glm4MLP",
"Glm4MoeMLP",
"Glm4vMoeTextMLP",
"Gemma3MLP",
"Gemma2MLP",
"Gemma3nTextMLP",
"Phi3MLP",
"DeepseekV2MLP",
"DeepseekV3MLP",
"SeedOssMLP",
}
)
_kernel_id = "npu_fused_swiglu"
_device = DeviceType.NPU
@classmethod
def apply(cls, **kwargs) -> "HFModel":
model = kwargs.get("model", None)
if model is None:
raise ValueError(f"HFModel instance is required for {cls.__name__}.")
if not cls.check_deps():
raise RuntimeError("torch_npu is not available but NpuSwiGluKernel was called.")
# Mapping of specific mlp modules to their corresponding kernel implementations
kernel_mapping = {
"Glm4MLP": _npu_swiglu_glm4_forward,
"Glm4vTextMLP": _npu_swiglu_glm4_forward,
"Phi3MLP": _npu_swiglu_glm4_forward,
"Gemma3nTextMLP": _npu_swiglu_gemma3ntext_forward,
}
swiglu_pattern = re.compile("MLP", re.IGNORECASE)
for name, module in model.named_modules():
# Match any module whose class name contains "MLP"
if (
re.search(swiglu_pattern, module.__class__.__name__)
and module.__class__.__name__ in cls.expect_modules
):
# Bind function as an instance method to preserve `self` semantics
# and replace the original forward
kernel_func = kernel_mapping.get(module.__class__.__name__, npu_swiglu_forward)
module.forward = types.MethodType(kernel_func, module)
return model | --- +++ @@ -12,6 +12,13 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of NPU fused SwiGLU kernels.
+
+Init Phase:
+1. Define SwiGLU forward functions.
+2. Register NPU fused SwiGLU kernel.
+
+"""
import re
import types
@@ -31,18 +38,45 @@
def npu_swiglu_forward(self, hidden_state):
+ """SwiGLU forward pass for NPU.
+
+ Args:
+ self: The MLP layer instance.
+ hidden_state (Tensor): Input hidden state.
+
+ Returns:
+ Tensor: Output of SwiGLU.
+ """
return self.down_proj(
torch_npu.npu_swiglu(torch.cat((self.gate_proj(hidden_state), self.up_proj(hidden_state)), dim=-1), dim=-1)
)
def _npu_swiglu_glm4_forward(self, hidden_states):
+ """SwiGLU forward pass for GLM4 on NPU.
+
+ Args:
+ self: The GLM4 MLP layer instance.
+ hidden_states (Tensor): Input hidden states.
+
+ Returns:
+ Tensor: Output of SwiGLU.
+ """
up_states = self.gate_up_proj(hidden_states)
gate, up_states = up_states.chunk(2, dim=-1)
return self.down_proj(torch_npu.npu_swiglu(torch.cat((gate, up_states), dim=-1), dim=-1))
def _npu_swiglu_gemma3ntext_forward(self, hidden_states):
+ """SwiGLU forward pass for Gemma3nText on NPU.
+
+ Args:
+ self: The Gemma3nText MLP layer instance.
+ hidden_states (Tensor): Input hidden states.
+
+ Returns:
+ Tensor: Output of SwiGLU.
+ """
gate_proj = self.gate_proj(hidden_states)
if self.activation_sparsity > 0.0:
gate_proj = self._gaussian_topk(gate_proj)
@@ -54,6 +88,7 @@
@register_kernel
class NpuSwiGluKernel(BaseKernel):
+ """NPU Kernel for fused SwiGLU activation."""
# just support apply to the following module layers
expect_modules = frozenset(
@@ -91,6 +126,18 @@
@classmethod
def apply(cls, **kwargs) -> "HFModel":
+ """Applies the NPU fused SwiGLU kernel to the model.
+
+ Args:
+ **kwargs: Keyword arguments containing the model.
+
+ Returns:
+ HFModel: The model with patched SwiGLU forward functions.
+
+ Raises:
+ ValueError: If the model is not provided.
+ RuntimeError: If dependencies are not met.
+ """
model = kwargs.get("model", None)
if model is None:
raise ValueError(f"HFModel instance is required for {cls.__name__}.")
@@ -118,4 +165,4 @@ kernel_func = kernel_mapping.get(module.__class__.__name__, npu_swiglu_forward)
module.forward = types.MethodType(kernel_func, module)
- return model+ return model
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_swiglu.py |
Create simple docstrings for beginners | # Copyright 2025 Bytedance Ltd. and the LlamaFactory team.
#
# This code is inspired by the Bytedance's VeOmni library.
# https://github.com/ByteDance-Seed/VeOmni/blob/v0.1.4/veomni/utils/dist_utils.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections.abc import Callable
from contextlib import contextmanager
from enum import StrEnum, unique
from functools import lru_cache, wraps
from typing import Optional
import numpy as np
import torch
import torch.distributed as dist
from ..utils.types import ProcessGroup, Tensor, TensorLike
@unique
class DeviceType(StrEnum):
CPU = "cpu"
CUDA = "cuda"
META = "meta"
MPS = "mps"
NPU = "npu"
XPU = "xpu"
@unique
class ReduceOp(StrEnum):
SUM = "sum"
MEAN = "mean"
MAX = "max"
MIN = "min"
def requires_accelerator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not hasattr(torch, "accelerator"):
raise RuntimeError("torch.accelerator is not available, please upgrade torch to 2.7.0 or higher.")
return fn(*args, **kwargs)
return wrapper
def is_distributed() -> bool:
return os.getenv("RANK") is not None
def get_rank() -> int:
return int(os.getenv("RANK", "0"))
def get_world_size() -> int:
return int(os.getenv("WORLD_SIZE", "1"))
def get_local_rank() -> int:
return int(os.getenv("LOCAL_RANK", "0"))
def get_local_world_size() -> int:
return int(os.getenv("LOCAL_WORLD_SIZE", "1"))
@lru_cache
@requires_accelerator
def get_current_accelerator(check_available: bool = True) -> torch.device:
accelerator = torch.accelerator.current_accelerator(check_available=check_available)
return accelerator or torch.device(DeviceType.CPU.value)
@lru_cache
@requires_accelerator
def get_device_count() -> int:
return torch.accelerator.device_count()
@requires_accelerator
def synchronize() -> None:
torch.accelerator.synchronize()
@requires_accelerator
def set_device_index() -> None:
if get_current_accelerator().type != DeviceType.CPU:
torch.accelerator.set_device_index(get_local_rank())
@requires_accelerator
def get_current_device() -> torch.device:
if get_current_accelerator().type == DeviceType.CPU:
return torch.device(DeviceType.CPU.value)
else:
return torch.device(type=get_current_accelerator().type, index=torch.accelerator.current_device_index())
def is_torch_cuda_available():
return get_current_accelerator().type == DeviceType.CUDA
def is_torch_mps_available():
return get_current_accelerator().type == DeviceType.MPS
def is_torch_npu_available():
return get_current_accelerator().type == DeviceType.NPU
def is_torch_xpu_available():
return get_current_accelerator().type == DeviceType.XPU
def operate_tensorlike(fn: Callable[[...], Tensor], data: TensorLike, **kwargs) -> TensorLike:
device = get_current_accelerator()
is_tensor = isinstance(data, torch.Tensor)
is_ndarray = isinstance(data, np.ndarray)
if is_tensor:
orig_device = data.device
data = data.to(device=device)
elif is_ndarray:
data = torch.from_numpy(data).to(device=device, dtype=torch.float)
else:
data = torch.tensor(data, dtype=torch.float, device=device)
result = fn(data, **kwargs)
if is_tensor:
return result.to(orig_device)
elif is_ndarray:
return result.cpu().numpy()
elif result.numel() == 1:
return result.item()
else:
return result.tolist()
def get_process_group_backend() -> str:
if get_current_accelerator().type == DeviceType.NPU:
return "hccl"
elif get_current_accelerator().type == DeviceType.CUDA:
return "nccl"
else:
return "gloo"
def all_gather(tensor: Tensor, group: Optional[ProcessGroup] = None) -> Tensor:
world_size = get_world_size()
output_tensor = torch.empty(world_size * tensor.numel(), dtype=tensor.dtype, device=tensor.device)
dist.all_gather_into_tensor(output_tensor, tensor, group=group)
return output_tensor.view(-1, *tensor.size())
def all_reduce(tensor: Tensor, op: ReduceOp = ReduceOp.MEAN, group: Optional[ProcessGroup] = None) -> Tensor:
reduce_ops = {
ReduceOp.MEAN: dist.ReduceOp.SUM,
ReduceOp.SUM: dist.ReduceOp.SUM,
ReduceOp.MAX: dist.ReduceOp.MAX,
ReduceOp.MIN: dist.ReduceOp.MIN,
}
dist.all_reduce(tensor, op=reduce_ops[op], group=group)
if op == ReduceOp.MEAN: # ReduceOp.AVG is not supported by the NPU backend
tensor /= dist.get_world_size(group=group)
return tensor
def broadcast(tensor: Tensor, src: int = 0, group: Optional[ProcessGroup] = None) -> Tensor:
dist.broadcast(tensor, src=src, group=group)
return tensor
@contextmanager
def main_process_first(local_only: bool = True) -> None:
if get_world_size() > 1:
is_main_process = get_local_rank() == 0 if local_only else get_rank() == 0
try:
if not is_main_process:
dist.barrier()
yield
finally:
if is_main_process:
dist.barrier()
else:
yield | --- +++ @@ -15,6 +15,14 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Utility functions used by the distributed interface.
+
+Including:
+- Environment info (rank, world_size, local_rank, etc.)
+- Accelerator info (device type, device count, etc.)
+- Collective communication operations (all_gather, all_reduce, broadcast)
+- Synchronize processes and ensure main-process-first execution order
+"""
import os
from collections.abc import Callable
@@ -49,6 +57,10 @@
def requires_accelerator(fn):
+ """Decorator to check if torch.accelerator is available.
+
+ Note: this api requires torch>=2.7.0, otherwise it will raise an AttributeError or RuntimeError
+ """
@wraps(fn)
def wrapper(*args, **kwargs):
@@ -61,28 +73,34 @@
def is_distributed() -> bool:
+ """Check if distributed environment is available."""
return os.getenv("RANK") is not None
def get_rank() -> int:
+ """Get rank."""
return int(os.getenv("RANK", "0"))
def get_world_size() -> int:
+ """Get world size."""
return int(os.getenv("WORLD_SIZE", "1"))
def get_local_rank() -> int:
+ """Get local rank."""
return int(os.getenv("LOCAL_RANK", "0"))
def get_local_world_size() -> int:
+ """Get local world size."""
return int(os.getenv("LOCAL_WORLD_SIZE", "1"))
@lru_cache
@requires_accelerator
def get_current_accelerator(check_available: bool = True) -> torch.device:
+ """Get current accelerator."""
accelerator = torch.accelerator.current_accelerator(check_available=check_available)
return accelerator or torch.device(DeviceType.CPU.value)
@@ -90,22 +108,26 @@ @lru_cache
@requires_accelerator
def get_device_count() -> int:
+ """Get the number of available devices."""
return torch.accelerator.device_count()
@requires_accelerator
def synchronize() -> None:
+ """Synchronize all processes."""
torch.accelerator.synchronize()
@requires_accelerator
def set_device_index() -> None:
+ """Set current accelerator index to local rank."""
if get_current_accelerator().type != DeviceType.CPU:
torch.accelerator.set_device_index(get_local_rank())
@requires_accelerator
def get_current_device() -> torch.device:
+ """Get current accelerator device."""
if get_current_accelerator().type == DeviceType.CPU:
return torch.device(DeviceType.CPU.value)
else:
@@ -113,22 +135,27 @@
def is_torch_cuda_available():
+ """Check if CUDA is available."""
return get_current_accelerator().type == DeviceType.CUDA
def is_torch_mps_available():
+ """Check if MPS is available."""
return get_current_accelerator().type == DeviceType.MPS
def is_torch_npu_available():
+ """Check if NPU is available."""
return get_current_accelerator().type == DeviceType.NPU
def is_torch_xpu_available():
+ """Check if XPU is available."""
return get_current_accelerator().type == DeviceType.XPU
def operate_tensorlike(fn: Callable[[...], Tensor], data: TensorLike, **kwargs) -> TensorLike:
+ """Operate tensorlike data on current accelerator."""
device = get_current_accelerator()
is_tensor = isinstance(data, torch.Tensor)
is_ndarray = isinstance(data, np.ndarray)
@@ -154,6 +181,7 @@
def get_process_group_backend() -> str:
+ """Get backend for init process group."""
if get_current_accelerator().type == DeviceType.NPU:
return "hccl"
elif get_current_accelerator().type == DeviceType.CUDA:
@@ -163,6 +191,7 @@
def all_gather(tensor: Tensor, group: Optional[ProcessGroup] = None) -> Tensor:
+ """Gathers the tensor from all ranks and stacks them at the first dim."""
world_size = get_world_size()
output_tensor = torch.empty(world_size * tensor.numel(), dtype=tensor.dtype, device=tensor.device)
dist.all_gather_into_tensor(output_tensor, tensor, group=group)
@@ -170,6 +199,7 @@
def all_reduce(tensor: Tensor, op: ReduceOp = ReduceOp.MEAN, group: Optional[ProcessGroup] = None) -> Tensor:
+ """Performs all reduce in the given process group."""
reduce_ops = {
ReduceOp.MEAN: dist.ReduceOp.SUM,
ReduceOp.SUM: dist.ReduceOp.SUM,
@@ -184,12 +214,14 @@
def broadcast(tensor: Tensor, src: int = 0, group: Optional[ProcessGroup] = None) -> Tensor:
+ """Broadcasts the tensor from the src process to all other processes."""
dist.broadcast(tensor, src=src, group=group)
return tensor
@contextmanager
def main_process_first(local_only: bool = True) -> None:
+ """A context manager for torch distributed environment to do something on the main process firstly."""
if get_world_size() > 1:
is_main_process = get_local_rank() == 0 if local_only else get_rank() == 0
try:
@@ -200,4 +232,4 @@ if is_main_process:
dist.barrier()
else:
- yield+ yield
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/accelerator/helper.py |
Fully document this Python code with docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import init_empty_weights
from transformers import AutoConfig, AutoProcessor
from ..accelerator.helper import DeviceType
from ..accelerator.interface import DistributedInterface
from ..config.model_args import ModelArguments, ModelClass
from ..utils import logging
from ..utils.types import HFConfig, HFModel, Processor
from .utils.rendering import Renderer
logger = logging.get_logger(__name__)
class ModelEngine:
def __init__(self, model_args: ModelArguments, is_train: bool = False) -> None:
self.args = model_args
"""Model arguments."""
self.is_train = is_train
"""Whether to train the model."""
self.processor = self._init_processor()
"""Tokenizer or multi-modal processor."""
self.renderer = Renderer(self.args.template, self.processor)
"""Renderer."""
self.model_config = self._init_model_config()
"""Model configuration."""
self.model = self._init_model()
"""HF model."""
def _init_processor(self) -> Processor:
return AutoProcessor.from_pretrained(
self.args.model,
trust_remote_code=self.args.trust_remote_code,
)
def _init_model_config(self) -> HFConfig:
return AutoConfig.from_pretrained(
self.args.model,
trust_remote_code=self.args.trust_remote_code,
)
def _init_model(self) -> HFModel:
if self.args.init_config is not None:
from ..plugins.model_plugins.initialization import InitPlugin
init_device = InitPlugin(self.args.init_config.name)()
else:
init_device = DistributedInterface().current_device
init_kwargs = {"device_map": init_device}
if self.args.quant_config is not None:
from ..plugins.model_plugins.quantization import QuantizationPlugin
init_kwargs = QuantizationPlugin(self.args.quant_config.name)(
init_kwargs=init_kwargs,
config=self.model_config,
tokenizer=self.processor,
model_args=self.args,
is_trainable=self.is_train,
)
if self.args.model_class == ModelClass.LLM:
from transformers import AutoModelForCausalLM, AutoModelForImageTextToText
if type(self.model_config) in AutoModelForImageTextToText._model_mapping.keys():
AutoClass = AutoModelForImageTextToText
else:
AutoClass = AutoModelForCausalLM
elif self.args.model_class == ModelClass.CLS:
from transformers import AutoModelForTokenClassification
AutoClass = AutoModelForTokenClassification
else:
from transformers import AutoModel
AutoClass = AutoModel
if init_device.type == DeviceType.META:
assert self.args.quant_config is None, "Quantization is not supported with meta device."
with init_empty_weights():
model = AutoClass.from_config(self.model_config)
else:
model = AutoClass.from_pretrained(
self.args.model,
config=self.model_config,
dtype="auto",
trust_remote_code=self.args.trust_remote_code,
**init_kwargs,
)
if self.args.peft_config is None:
if self.is_train:
logger.info_rank0("Fine-tuning mode: full tuning")
model = model.to(torch.float32)
else:
logger.info_rank0("Inference the original model")
else:
from ..plugins.model_plugins.peft import PeftPlugin
model = PeftPlugin(self.args.peft_config.name)(model, self.args.peft_config, self.is_train)
if self.args.kernel_config is not None:
from ..plugins.model_plugins.kernels.interface import KernelPlugin
model = KernelPlugin(self.args.kernel_config.name)(
model, include_kernels=self.args.kernel_config.get("include_kernels")
)
return model
if __name__ == "__main__":
"""
python -m llamafactory.v1.core.model_engine --model llamafactory/tiny-random-qwen2.5
"""
from ..config.arg_parser import get_args
model_args, *_ = get_args()
model_engine = ModelEngine(model_args=model_args)
print(model_engine.processor)
print(model_engine.model_config)
print(model_engine.model) | --- +++ @@ -12,6 +12,22 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of model engine.
+
+How to use:
+model_engine = ModelEngine(model_args, is_train=True)
+model_engine.processor: Get the tokenizer or multi-modal processor.
+model_engine.renderer: Get the renderer.
+model_engine.model_config: Get the model configuration.
+model_engine.model: Get the HF model.
+
+Init workflow:
+1. Init processor.
+2. Init render.
+2. Init model config.
+3. Init model.
+4. Init adapter.
+"""
import torch
from accelerate import init_empty_weights
@@ -29,6 +45,12 @@
class ModelEngine:
+ """Model engine.
+
+ Args:
+ model_args: Model arguments.
+ is_train: Whether to train the model.
+ """
def __init__(self, model_args: ModelArguments, is_train: bool = False) -> None:
self.args = model_args
@@ -45,18 +67,29 @@ """HF model."""
def _init_processor(self) -> Processor:
+ """Init processor.
+
+ NOTE: Transformers v5 always use fast tokenizer.
+ https://github.com/huggingface/transformers/blob/v5.0.0rc1/src/transformers/models/auto/tokenization_auto.py#L642
+ """
return AutoProcessor.from_pretrained(
self.args.model,
trust_remote_code=self.args.trust_remote_code,
)
def _init_model_config(self) -> HFConfig:
+ """Init model config."""
return AutoConfig.from_pretrained(
self.args.model,
trust_remote_code=self.args.trust_remote_code,
)
def _init_model(self) -> HFModel:
+ """Init model.
+
+ Transformers can choose the proper model init context.
+ https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/modeling_utils.py#L3538
+ """
if self.args.init_config is not None:
from ..plugins.model_plugins.initialization import InitPlugin
@@ -138,4 +171,4 @@ model_engine = ModelEngine(model_args=model_args)
print(model_engine.processor)
print(model_engine.model_config)
- print(model_engine.model)+ print(model_engine.model)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/core/model_engine.py |
Add docstrings following best practices | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from contextlib import nullcontext
from typing import TYPE_CHECKING, Literal, Optional
import torch
from transformers.integrations import is_deepspeed_zero3_enabled
from ...extras.packages import is_requests_available
if is_requests_available():
import requests
if TYPE_CHECKING:
from transformers import PreTrainedModel
from trl import AutoModelForCausalLMWithValueHead
def get_rewards_from_server(server_url: str, messages: list[str]) -> list["torch.Tensor"]:
headers = {"Content-Type": "application/json"}
payload = {"model": "model", "messages": messages}
response = requests.post(server_url, json=payload, headers=headers)
rewards = json.loads(response.text)["scores"]
return torch.Tensor(rewards)
def replace_model(model: "AutoModelForCausalLMWithValueHead", target: Literal["default", "reward"]) -> None:
v_head_layer = model.v_head.summary
if is_deepspeed_zero3_enabled():
import deepspeed # type: ignore
params = [v_head_layer.weight, v_head_layer.bias]
context_maybe_zero3 = deepspeed.zero.GatheredParameters(params, modifier_rank=0)
else:
context_maybe_zero3 = nullcontext()
model.pretrained_model.set_adapter(target) # set the LoRA adapter to be active
with context_maybe_zero3:
if target == "reward": # save default head temporarily
setattr(model, "default_head_weight", v_head_layer.weight.data.detach().clone())
setattr(model, "default_head_bias", v_head_layer.bias.data.detach().clone())
device = v_head_layer.weight.device
v_head_layer.weight.data = model.get_buffer(f"{target}_head_weight").detach().clone().to(device)
v_head_layer.bias.data = model.get_buffer(f"{target}_head_bias").detach().clone().to(device)
def dump_layernorm(model: "PreTrainedModel") -> dict[str, "torch.Tensor"]:
layer_norm_params = {}
for name, param in model.named_parameters():
if param.data.dtype == torch.float32:
layer_norm_params[name] = param.data.detach().clone()
param.data = param.data.to(model.config.torch_dtype)
return layer_norm_params
def restore_layernorm(model: "PreTrainedModel", layernorm_params: Optional[dict[str, "torch.Tensor"]] = None) -> None:
for name, param in model.named_parameters():
if name in layernorm_params:
param.data = layernorm_params[name] | --- +++ @@ -32,6 +32,7 @@
def get_rewards_from_server(server_url: str, messages: list[str]) -> list["torch.Tensor"]:
+ r"""Get reward scores from the API server."""
headers = {"Content-Type": "application/json"}
payload = {"model": "model", "messages": messages}
response = requests.post(server_url, json=payload, headers=headers)
@@ -40,6 +41,7 @@
def replace_model(model: "AutoModelForCausalLMWithValueHead", target: Literal["default", "reward"]) -> None:
+ r"""Replace the default/reward modules in the model. The model is already unwrapped."""
v_head_layer = model.v_head.summary
if is_deepspeed_zero3_enabled():
import deepspeed # type: ignore
@@ -61,6 +63,7 @@
def dump_layernorm(model: "PreTrainedModel") -> dict[str, "torch.Tensor"]:
+ r"""Dump the layernorm parameters in the model. The model is already unwrapped (and gathered)."""
layer_norm_params = {}
for name, param in model.named_parameters():
if param.data.dtype == torch.float32:
@@ -71,6 +74,7 @@
def restore_layernorm(model: "PreTrainedModel", layernorm_params: Optional[dict[str, "torch.Tensor"]] = None) -> None:
+ r"""Restore the layernorm parameters in the model. The model is already unwrapped (and gathered)."""
for name, param in model.named_parameters():
if name in layernorm_params:
- param.data = layernorm_params[name]+ param.data = layernorm_params[name]
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/ppo/ppo_utils.py |
Add clean documentation to messy code | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import types
from typing import TYPE_CHECKING, Any, Optional
from ..extras import logging
if TYPE_CHECKING:
from ..hparams import TrainingArguments
logger = logging.get_logger(__name__)
def create_fp8_kwargs(training_args: "TrainingArguments") -> list[Any]:
if not training_args.fp8:
return []
backend = getattr(training_args, "fp8_backend", "auto")
logger.info_rank0(f"Creating FP8 configuration with backend: {backend}")
try:
# Use Transformer Engine backend (optimal for Hopper GPUs)
if backend == "te":
from accelerate.utils import FP8RecipeKwargs
logger.info_rank0("Using Transformer Engine FP8 backend")
return [FP8RecipeKwargs(backend="TE", fp8_format="HYBRID", amax_history_len=16, amax_compute_algo="max")]
# Use TorchAO backend (default)
from accelerate.utils import AORecipeKwargs
# Create Float8LinearConfig if torchao backend is used
config = None
if backend == "torchao" or backend == "auto":
from torchao.float8 import Float8LinearConfig
# Use rowwise scaling for better performance (as recommended by torchao)
# Configure alignment requirements for FP8 kernels
config = Float8LinearConfig.from_recipe_name("rowwise")
# Enable alignment for better kernel performance
if hasattr(config, "enable_amax_init"):
config.enable_amax_init = True
if hasattr(config, "enable_pre_and_post_forward"):
config.enable_pre_and_post_forward = True
# Create module filter function to skip problematic layers
# TorchAO FP8 requires dimensions divisible by 16 for optimal kernels
def module_filter_func(module, layer_name):
# Skip embedding and output layers for numerical stability
skip_layers = ["embed", "lm_head", "output", "classifier"]
if any(skip_name in layer_name.lower() for skip_name in skip_layers):
return False
# Only convert Linear layers
if not (hasattr(module, "weight") and len(module.weight.shape) == 2):
return False
# Check dimension alignment for FP8 kernels
weight = module.weight
in_features, out_features = weight.shape[1], weight.shape[0]
# Skip layers with dimensions not divisible by 16 to avoid kernel errors
if in_features % 16 != 0 or out_features % 16 != 0:
logger.debug(
f"Skipping layer {layer_name} with dimensions {out_features}x{in_features} (not divisible by 16)"
)
return False
return True
# Map FSDP all-gather setting if available (this affects the underlying implementation)
if (
hasattr(training_args, "fp8_enable_fsdp_float8_all_gather")
and training_args.fp8_enable_fsdp_float8_all_gather
):
logger.info_rank0("FSDP float8 all-gather optimization requested")
return [AORecipeKwargs(config=config, module_filter_func=module_filter_func)]
except Exception as e:
logger.info_rank0(f"Failed to create FP8 configuration: {e}")
return []
def get_fp8_mixed_precision(training_args: "TrainingArguments") -> Optional[str]:
return "fp8" if training_args.fp8 else None
def configure_fp8_environment(training_args: "TrainingArguments") -> None:
if not training_args.fp8:
return
# Set mixed precision to fp8 for HuggingFace Accelerate
os.environ["ACCELERATE_MIXED_PRECISION"] = "fp8"
logger.info_rank0("Set ACCELERATE_MIXED_PRECISION=fp8")
# Configure FP8 backend and options
backend = getattr(training_args, "fp8_backend", "auto")
if backend != "auto":
os.environ["FP8_BACKEND"] = backend
logger.info_rank0(f"Set FP8_BACKEND={backend}")
# Create and validate FP8 recipe kwargs (for logging/debugging)
fp8_kwargs = create_fp8_kwargs(training_args)
logger.info_rank0(f"FP8 AORecipeKwargs created: {len(fp8_kwargs)} items")
# Enable FSDP float8 all-gather optimization if requested
if hasattr(training_args, "fp8_enable_fsdp_float8_all_gather") and training_args.fp8_enable_fsdp_float8_all_gather:
os.environ["FP8_ENABLE_FSDP_FLOAT8_ALL_GATHER"] = "true"
logger.info_rank0("Set FP8_ENABLE_FSDP_FLOAT8_ALL_GATHER=true")
logger.info_rank0("FP8 environment configured - all FP8 training handled by HuggingFace Accelerate")
def verify_fp8_status(accelerator, training_args: "TrainingArguments") -> None:
if not training_args.fp8:
return
# Check Accelerate's FP8 status
fp8_enabled = getattr(accelerator, "fp8_enabled", False)
fp8_backend_type = getattr(accelerator, "fp8_backend", "UNKNOWN")
backend = getattr(training_args, "fp8_backend", "auto")
if backend == "torchao" or backend == "auto":
logger.info_rank0(
"FP8 training enabled with TorchAO backend. For optimal performance, "
"ensure model layer dimensions are mostly divisible by 16. "
"If you encounter issues, try fp8_backend='te' with Transformer Engine."
)
else:
logger.info_rank0(f"FP8 training enabled with {backend} backend.")
logger.info_rank0(f"Accelerate FP8 status - enabled: {fp8_enabled}, backend: {fp8_backend_type}")
if not fp8_enabled:
logger.info_rank0("WARNING: FP8 was requested but Accelerate shows fp8_enabled=False. FP8 may not be working.")
def patch_accelerator_for_fp8() -> None:
import transformer_engine.pytorch as te
from accelerate import Accelerator
# Guard against multiple patches
if getattr(Accelerator, "_te_fp8_patched", False):
return
# Stub for Accelerate 1.12+ compatibility (te.fp8.check_mxfp8_support doesn't exist yet)
if not hasattr(te, "fp8"):
te.fp8 = types.ModuleType("fp8")
te.fp8.check_mxfp8_support = lambda: (False, "MXFP8 not supported")
try:
from accelerate.utils import TERecipeKwargs as FP8Recipe
use_te_recipe = True
except ImportError:
from accelerate.utils import FP8RecipeKwargs as FP8Recipe
use_te_recipe = False
original_init = Accelerator.__init__
def patched_init(self, *args, **kwargs):
if "kwargs_handlers" not in kwargs or not kwargs["kwargs_handlers"]:
if use_te_recipe:
kwargs["kwargs_handlers"] = [
FP8Recipe(fp8_format="HYBRID", amax_history_len=16, amax_compute_algo="max")
]
else:
kwargs["kwargs_handlers"] = [
FP8Recipe(backend="TE", fp8_format="HYBRID", amax_history_len=16, amax_compute_algo="max")
]
# Only force mixed_precision when we inject handlers
kwargs["mixed_precision"] = "fp8"
return original_init(self, *args, **kwargs)
Accelerator.__init__ = patched_init
Accelerator._te_fp8_patched = True | --- +++ @@ -27,6 +27,14 @@
def create_fp8_kwargs(training_args: "TrainingArguments") -> list[Any]:
+ """Create AORecipeKwargs for FP8 training with HuggingFace Accelerate.
+
+ Args:
+ training_args: Training arguments containing FP8 configuration
+
+ Returns:
+ List containing AORecipeKwargs if FP8 is enabled and supported, empty list otherwise
+ """
if not training_args.fp8:
return []
@@ -98,10 +106,27 @@
def get_fp8_mixed_precision(training_args: "TrainingArguments") -> Optional[str]:
+ """Get the mixed precision setting for Accelerate when using FP8.
+
+ Args:
+ training_args: Training arguments containing FP8 configuration
+
+ Returns:
+ "fp8" if FP8 is enabled, None otherwise
+ """
return "fp8" if training_args.fp8 else None
def configure_fp8_environment(training_args: "TrainingArguments") -> None:
+ """Configure FP8 environment for HuggingFace Accelerate.
+
+ FP8 training is handled entirely through HuggingFace Accelerate, regardless of whether
+ DeepSpeed or FSDP is used for distributed training. This function sets up the environment
+ variables and validates the FP8 configuration.
+
+ Args:
+ training_args: Training arguments containing FP8 configuration
+ """
if not training_args.fp8:
return
@@ -128,6 +153,12 @@
def verify_fp8_status(accelerator, training_args: "TrainingArguments") -> None:
+ """Verify that FP8 training is actually working after model preparation.
+
+ Args:
+ accelerator: The HuggingFace Accelerator instance
+ training_args: Training arguments containing FP8 configuration
+ """
if not training_args.fp8:
return
@@ -152,6 +183,11 @@
def patch_accelerator_for_fp8() -> None:
+ """Patch Accelerator to inject FP8 recipe kwargs.
+
+ This is needed because HuggingFace Trainer doesn't pass kwargs_handlers to Accelerator.
+ We monkey-patch Accelerator.__init__ to inject the FP8 recipe and force mixed_precision='fp8'.
+ """
import transformer_engine.pytorch as te
from accelerate import Accelerator
@@ -190,4 +226,4 @@ return original_init(self, *args, **kwargs)
Accelerator.__init__ = patched_init
- Accelerator._te_fp8_patched = True+ Accelerator._te_fp8_patched = True
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/fp8_utils.py |
Create docstrings for all classes and functions | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from pathlib import Path
from ....utils import logging
from ....utils.plugin import BasePlugin
from ....utils.types import HFModel
from .registry import Registry
logger = logging.get_logger(__name__)
def scan_all_kernels():
ops_path = Path(__file__).parent / "ops"
if not ops_path.exists():
return
base_package = __package__
for file_path in ops_path.rglob("*.py"):
if file_path.name == "__init__.py":
continue
# calculate the relative path:
# file_path = .../kernels_v2/ops/mlp/npu_swiglu.py
# rel_path = ops/mlp/npu_swiglu.py
rel_path = file_path.relative_to(Path(__file__).parent)
# build module path:
module_name = ".".join(rel_path.parts)[:-3]
full_module_name = f"{base_package}.{module_name}"
try:
importlib.import_module(full_module_name)
except Exception as e:
logger.warning(f"[Kernel Registry] Failed to import {full_module_name} when loading kernels: {e}")
return Registry.get_registered_kernels()
default_kernels = scan_all_kernels()
def get_default_kernels():
return list(default_kernels.keys())
def apply_kernel(kernel_id: str, **kwargs):
kernel = default_kernels.get(kernel_id)
if kernel is None:
raise ValueError(f"Kernel {kernel_id} not found")
kernel.apply(**kwargs)
class KernelPlugin(BasePlugin):
pass
@KernelPlugin("auto").register()
def apply_default_kernels(model: HFModel, include_kernels: str = None) -> HFModel:
if not include_kernels:
return model
elif include_kernels == "auto" or include_kernels is True:
use_kernels = default_kernels.keys()
else:
use_kernels = include_kernels.split(",") # "kernel_id1,kernel_id2,kernel_id3"
for kernel in use_kernels:
if kernel not in default_kernels:
raise ValueError(f"Kernel {kernel} not found")
apply_kernel(kernel, model=model)
return model | --- +++ @@ -12,6 +12,14 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of kernel interface.
+
+Init Phase:
+1. Scan all kernels.
+2. Register default kernels.
+3. Define kernel plugin.
+
+"""
import importlib
from pathlib import Path
@@ -26,6 +34,18 @@
def scan_all_kernels():
+ """Scan all kernels in the ``ops`` directory.
+
+ Scans the ``ops`` directory for all ``.py`` files and attempts to import them.
+ Importing triggers the :func:`~registry.register_kernel` decorator, which automatically registers the kernels.
+
+ Returns:
+ dict[str, type[BaseKernel]]: A dictionary of registered kernels.
+
+ .. note::
+ This function assumes that the ``ops`` directory is located in the same directory as this file.
+ It recursively searches for ``.py`` files and constructs the module path for import.
+ """
ops_path = Path(__file__).parent / "ops"
if not ops_path.exists():
@@ -58,10 +78,25 @@
def get_default_kernels():
+ """Get a list of default registered kernel IDs.
+
+ Returns:
+ list[str]: List of kernel IDs.
+ """
return list(default_kernels.keys())
def apply_kernel(kernel_id: str, **kwargs):
+ """Applies a specific kernel to the model.
+
+ Args:
+ kernel_id (str): The ID of the kernel to apply.
+ **kwargs: Keyword arguments passed to the kernel application function.
+ Typically includes the model instance.
+
+ Returns:
+ HFModel: The model with applied kernel.
+ """
kernel = default_kernels.get(kernel_id)
if kernel is None:
raise ValueError(f"Kernel {kernel_id} not found")
@@ -70,12 +105,25 @@
class KernelPlugin(BasePlugin):
+ """Plugin for managing kernel optimizations."""
pass
@KernelPlugin("auto").register()
def apply_default_kernels(model: HFModel, include_kernels: str = None) -> HFModel:
+ """Applies all default registered kernels to the model.
+
+ Args:
+ model (HFModel): The model instance to apply kernels to.
+ include_kernels (str, optional): Comma-separated list of kernel IDs to apply.
+ If "auto" or True, applies all default kernels.
+ If None or False, no kernels are applied.
+ Defaults to None.
+
+ Returns:
+ HFModel: The model with applied kernels.
+ """
if not include_kernels:
return model
elif include_kernels == "auto" or include_kernels is True:
@@ -89,4 +137,4 @@
apply_kernel(kernel, model=model)
- return model+ return model
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/kernels/interface.py |
Write docstrings that follow conventions | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
from abc import ABC, abstractmethod
from collections.abc import AsyncGenerator
from threading import Thread
import torch
from transformers import AsyncTextIteratorStreamer
from ...accelerator.interface import DistributedInterface
from ...config import ModelArguments, SampleArguments
from ...utils.helper import get_tokenizer
from ...utils.types import HFModel, Message, Sample, TorchDataset
from .rendering import Renderer
class BaseEngine(ABC):
@abstractmethod
def __init__(
self,
args: SampleArguments,
model_args: ModelArguments,
model: HFModel,
renderer: Renderer,
) -> None:
...
@abstractmethod
async def generate(self, messages: list[Message], tools: str | None = None) -> AsyncGenerator[str, None]:
...
@abstractmethod
async def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
...
class HuggingFaceEngine(BaseEngine):
def __init__(
self,
args: SampleArguments,
model_args: ModelArguments,
model: HFModel,
renderer: Renderer,
) -> None:
self.args = args
self.model_args = model_args
self.model = model
self.renderer = renderer
self.semaphore = asyncio.Semaphore(int(os.getenv("MAX_CONCURRENT", "1")))
@torch.inference_mode()
async def generate(self, messages: list[Message], tools: str | None = None) -> AsyncGenerator[str, None]:
async with self.semaphore:
model_inputs = self.renderer.render_messages(messages, tools, is_generate=True)
streamer = AsyncTextIteratorStreamer(
tokenizer=get_tokenizer(self.renderer.processor),
skip_prompt=True,
skip_special_tokens=True, # TODO: configurable
)
device = DistributedInterface().current_device
kwargs = {
"input_ids": torch.tensor([model_inputs["input_ids"]]).to(device),
"attention_mask": torch.tensor([model_inputs["attention_mask"]]).to(device),
"max_new_tokens": self.args.max_new_tokens,
"streamer": streamer,
}
thread = Thread(target=self.model.generate, kwargs=kwargs, daemon=True)
thread.start()
async for token in streamer:
yield token
async def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
raise NotImplementedError("Batch infer is not implemented.") | --- +++ @@ -37,14 +37,39 @@ model: HFModel,
renderer: Renderer,
) -> None:
+ """Initialize the engine.
+
+ Args:
+ args: Sample arguments.
+ model_args: Model arguments.
+ model: Model.
+ renderer: Renderer.
+ """
...
@abstractmethod
async def generate(self, messages: list[Message], tools: str | None = None) -> AsyncGenerator[str, None]:
+ """Generate tokens asynchronously.
+
+ Args:
+ messages: List of messages.
+ tools: Tools string.
+
+ Yields:
+ Generated tokens.
+ """
...
@abstractmethod
async def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
+ """Batch infer samples.
+
+ Args:
+ dataset: Torch dataset.
+
+ Returns:
+ List of samples.
+ """
...
@@ -85,4 +110,12 @@ yield token
async def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
- raise NotImplementedError("Batch infer is not implemented.")+ """Batch infer samples.
+
+ Args:
+ dataset: Torch dataset.
+
+ Returns:
+ List of samples.
+ """
+ raise NotImplementedError("Batch infer is not implemented.")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/core/utils/inference_engine.py |
Improve my code by adding docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ...utils.constants import IGNORE_INDEX
from ...utils.helper import get_tokenizer
from ...utils.types import Message, ModelInput, Processor, Sample
def render_chatml_messages(
processor: Processor,
messages: list[Message],
tools: str | None = None,
is_generate: bool = False,
) -> ModelInput:
tokenizer = get_tokenizer(processor)
input_ids, labels, loss_weights = [], [], []
for message in messages:
temp_str = "<|im_start|>" + message["role"] + "\n"
for content in message["content"]:
if content["type"] == "text":
temp_str += content["value"]
else:
raise ValueError(f"Unsupported content type: {content['type']}")
temp_str += "<|im_end|>\n"
temp_weight = message.get("loss_weight", 1.0 if message["role"] == "assistant" else 0.0)
temp_ids = tokenizer.encode(temp_str, add_special_tokens=False)
input_ids.extend(temp_ids)
loss_weights.extend([temp_weight] * len(temp_ids))
if temp_weight > 1e-6:
labels.extend(temp_ids)
else:
labels.extend([IGNORE_INDEX] * len(temp_ids))
if is_generate:
temp_ids = tokenizer.encode("<|im_start|>assistant\n", add_special_tokens=False)
input_ids.extend(temp_ids)
loss_weights.extend([0.0] * len(temp_ids))
labels.extend([IGNORE_INDEX] * len(temp_ids))
return ModelInput(
input_ids=input_ids,
attention_mask=[1] * len(input_ids),
labels=labels,
loss_weights=loss_weights,
)
def parse_chatml_message(generated_text: str) -> Message:
return Message(role="assistant", content=[{"type": "text", "value": generated_text}])
class Renderer:
def __init__(self, template: str, processor: Processor):
self.template = template
self.processor = processor
def render_messages(
self,
messages: list[Message],
tools: str | None = None,
is_generate: bool = False,
enable_thinking: bool = False,
) -> ModelInput:
if self.template == "chatml":
return render_chatml_messages(self.processor, messages, tools, is_generate)
else:
from ...plugins.model_plugins.rendering import RenderingPlugin
return RenderingPlugin(self.template).render_messages(
self.processor, messages, tools, is_generate, enable_thinking
)
def parse_message(self, generated_text: str) -> Message:
if self.template == "chatml":
return parse_chatml_message(generated_text)
else:
from ...plugins.model_plugins.rendering import RenderingPlugin
return RenderingPlugin(self.template).parse_message(generated_text)
def process_samples(self, samples: list[Sample]) -> list[ModelInput]:
model_inputs = []
for sample in samples:
if "messages" in sample:
model_input = self.render_messages(sample["messages"], sample.get("tools"))
elif "chosen_messages" in sample and "rejected_messages" in sample:
chosen_input = self.render_messages(sample["chosen_messages"], sample.get("tools"))
rejected_input = self.render_messages(sample["rejected_messages"], sample.get("tools"))
chosen_input["token_type_ids"] = [1] * len(chosen_input["input_ids"])
rejected_input["token_type_ids"] = [2] * len(rejected_input["input_ids"])
model_input = ModelInput(
input_ids=chosen_input["input_ids"] + rejected_input["input_ids"],
attention_mask=chosen_input["attention_mask"] + rejected_input["attention_mask"],
labels=chosen_input["labels"] + rejected_input["labels"],
loss_weights=chosen_input["loss_weights"] + rejected_input["loss_weights"],
token_type_ids=chosen_input["token_type_ids"] + rejected_input["token_type_ids"],
)
if "position_ids" in chosen_input:
model_input["position_ids"] = np.concatenate(
[chosen_input["position_ids"], rejected_input["position_ids"]], axis=-1
)
else:
raise ValueError("No valid messages or chosen_messages/rejected_messages found in sample.")
if "extra_info" in sample:
model_input["extra_info"] = sample["extra_info"]
if "_dataset_name" in sample:
model_input["_dataset_name"] = sample["_dataset_name"]
model_inputs.append(model_input)
return model_inputs | --- +++ @@ -12,6 +12,14 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""Rendering utils.
+
+How to use:
+renderer = Renderer(template, processor)
+renderer.render_messages(messages: list[Message], tools: str | None) -> ModelInputs
+renderer.parse_message(text: str) -> Message
+renderer.process_samples(samples: list[Sample]) -> list[ModelInput]
+"""
import numpy as np
@@ -26,6 +34,10 @@ tools: str | None = None,
is_generate: bool = False,
) -> ModelInput:
+ """Apply chatml template to messages and convert them to model input.
+
+ See https://huggingface.co/spaces/huggingfacejs/chat-template-playground?modelId=Qwen/Qwen2-7B-Instruct
+ """
tokenizer = get_tokenizer(processor)
input_ids, labels, loss_weights = [], [], []
@@ -62,6 +74,14 @@
def parse_chatml_message(generated_text: str) -> Message:
+ """Parse a message in ChatML format.
+
+ Args:
+ generated_text (str): The generated text in ChatML format.
+
+ Returns:
+ Message: The parsed message.
+ """
return Message(role="assistant", content=[{"type": "text", "value": generated_text}])
@@ -77,6 +97,17 @@ is_generate: bool = False,
enable_thinking: bool = False,
) -> ModelInput:
+ """Apply template to messages and convert them to model input.
+
+ Args:
+ messages (list[Message]): The messages to render.
+ tools (str | None, optional): The tools to use. Defaults to None.
+ is_generate (bool, optional): Whether to render for generation. Defaults to False.
+ enable_thinking (bool, optional): Whether to enable thinking mode for generation. Defaults to False.
+
+ Returns:
+ ModelInput: The rendered model input.
+ """
if self.template == "chatml":
return render_chatml_messages(self.processor, messages, tools, is_generate)
else:
@@ -87,6 +118,14 @@ )
def parse_message(self, generated_text: str) -> Message:
+ """Parse a message in the template format.
+
+ Args:
+ generated_text (str): The generated text in the template format.
+
+ Returns:
+ Message: The parsed message.
+ """
if self.template == "chatml":
return parse_chatml_message(generated_text)
else:
@@ -95,6 +134,14 @@ return RenderingPlugin(self.template).parse_message(generated_text)
def process_samples(self, samples: list[Sample]) -> list[ModelInput]:
+ """Process samples to model input.
+
+ Args:
+ samples (list[Sample]): The samples to process.
+
+ Returns:
+ list[ModelInput]: The processed model inputs.
+ """
model_inputs = []
for sample in samples:
if "messages" in sample:
@@ -126,4 +173,4 @@
model_inputs.append(model_input)
- return model_inputs+ return model_inputs
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/core/utils/rendering.py |
Write docstrings for data processing functions | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from ...utils import logging
from ...utils.plugin import BasePlugin
from ...utils.types import Message, ModelInput, Processor
logger = logging.get_logger(__name__)
class RenderingPlugin(BasePlugin):
_attempted_template_imports: set[str] = set()
def _ensure_template_imported(self) -> None:
if self.name is None or self.name in self._attempted_template_imports:
return
full_module_name = f"{__package__}.templates.{self.name}"
self._attempted_template_imports.add(self.name)
try:
importlib.import_module(full_module_name)
except Exception as exc:
logger.warning(f"[Template Registry] Failed to import {full_module_name}: {exc}")
def __getitem__(self, method_name: str):
self._ensure_template_imported()
return super().__getitem__(method_name)
def render_messages(
self,
processor: Processor,
messages: list[Message],
tools: str | None = None,
is_generate: bool = False,
enable_thinking: bool = False,
) -> ModelInput:
return self["render_messages"](processor, messages, tools, is_generate, enable_thinking)
def parse_messages(self, generated_text: str) -> Message:
return self["parse_messages"](generated_text) | --- +++ @@ -48,7 +48,9 @@ is_generate: bool = False,
enable_thinking: bool = False,
) -> ModelInput:
+ """Render messages in the template format."""
return self["render_messages"](processor, messages, tools, is_generate, enable_thinking)
def parse_messages(self, generated_text: str) -> Message:
- return self["parse_messages"](generated_text)+ """Parse messages in the template format."""
+ return self["parse_messages"](generated_text)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/rendering.py |
Provide clean and structured docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import AsyncGenerator
from ..config import ModelArguments, SampleArguments, SampleBackend
from ..utils.types import HFModel, Message, Sample, TorchDataset
from .utils.inference_engine import HuggingFaceEngine
from .utils.rendering import Renderer
class BaseSampler:
def __init__(
self,
args: SampleArguments,
model_args: ModelArguments,
model: HFModel,
renderer: Renderer,
) -> None:
if args.sample_backend == SampleBackend.HF:
self.engine = HuggingFaceEngine(args, model_args, model, renderer)
else:
raise ValueError(f"Unknown sample backend: {args.sample_backend}")
async def generate(self, messages: list[Message], tools: str | None = None) -> AsyncGenerator[str, None]:
async for token in self.engine.generate(messages, tools):
yield token
async def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
return await self.engine.batch_infer(dataset) | --- +++ @@ -21,6 +21,14 @@
class BaseSampler:
+ """Base sampler.
+
+ Args:
+ args: Sample arguments.
+ model_args: Model arguments.
+ model: Model.
+ renderer: Renderer.
+ """
def __init__(
self,
@@ -35,8 +43,25 @@ raise ValueError(f"Unknown sample backend: {args.sample_backend}")
async def generate(self, messages: list[Message], tools: str | None = None) -> AsyncGenerator[str, None]:
+ """Generate tokens asynchronously.
+
+ Args:
+ messages: List of messages.
+ tools: Tools string.
+
+ Yields:
+ Generated tokens.
+ """
async for token in self.engine.generate(messages, tools):
yield token
async def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
- return await self.engine.batch_infer(dataset)+ """Batch infer samples.
+
+ Args:
+ dataset: Torch dataset.
+
+ Returns:
+ List of samples.
+ """
+ return await self.engine.batch_infer(dataset)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/core/base_sampler.py |
Provide docstrings following PEP 257 | # Copyright 2025 Bytedance Ltd. and the LlamaFactory team.
#
# This code is inspired by the Bytedance's VeOmni library.
# https://github.com/ByteDance-Seed/VeOmni/blob/v0.1.4/veomni/distributed/parallel_state.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from datetime import timedelta
from enum import StrEnum
from typing import Any, Optional
from torch.distributed import barrier, destroy_process_group, init_process_group
from torch.distributed.device_mesh import DeviceMesh, init_device_mesh
from ..utils import logging
from ..utils.types import DistributedConfig, ProcessGroup, TensorLike
from . import helper
logger = logging.get_logger(__name__)
class Dim(StrEnum):
MP_REPLICATE = "mp_replicate"
MP_SHARD = "mp_shard"
DP = "dp"
CP = "cp"
@dataclass
class DistributedStrategy:
mp_replicate_size: int = 1
"""Model parallel replicate size, default to 1."""
mp_shard_size: int | None = None
"""Model parallel shard size, default to world_size // mp_replicate_size."""
dp_size: int | None = None
"""Data parallel size, default to world_size // cp_size."""
cp_size: int = 1
"""Context parallel size, default to 1."""
def __post_init__(self) -> None:
if not helper.is_distributed():
self.mp_shard_size = 1
elif self.mp_shard_size is None:
self.mp_shard_size = helper.get_world_size() // self.mp_replicate_size
elif self.mp_replicate_size * self.mp_shard_size != helper.get_world_size():
raise ValueError(
f"mp_replicate_size * mp_shard_size must equal to world_size, "
f"got {self.mp_replicate_size} * {self.mp_shard_size} != {helper.get_world_size()}."
)
if not helper.is_distributed():
self.dp_size = 1
elif self.dp_size is None:
self.dp_size = helper.get_world_size() // self.cp_size
elif self.dp_size * self.cp_size != helper.get_world_size():
raise ValueError(
f"dp_size * cp_size must equal to world_size, "
f"got {self.dp_size} * {self.cp_size} != {helper.get_world_size()}."
)
@property
def model_mesh_shape(self) -> tuple[int, int]:
return (self.mp_replicate_size, self.mp_shard_size)
@property
def model_mesh_dim_names(self) -> tuple[str, str]:
return (Dim.MP_REPLICATE.value, Dim.MP_SHARD.value)
@property
def data_mesh_shape(self) -> tuple[int, int]:
return (self.dp_size, self.cp_size)
@property
def data_mesh_dim_names(self) -> tuple[str, str]:
return (Dim.DP.value, Dim.CP.value)
class DistributedInterface:
_instance: Optional["DistributedInterface"] = None
_initialized: bool = False
def __new__(cls, *args: Any, **kwargs: Any) -> "DistributedInterface":
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self, config: DistributedConfig | None = None) -> None:
if self._initialized:
return
helper.set_device_index()
self._is_distributed = helper.is_distributed()
self._rank = helper.get_rank()
self._world_size = helper.get_world_size()
self._local_rank = helper.get_local_rank()
self._local_world_size = helper.get_local_world_size()
self.current_device = helper.get_current_device()
self.device_count = helper.get_device_count()
if config is None:
self.strategy = DistributedStrategy()
timeout = 18000
else:
self.strategy = DistributedStrategy(
mp_replicate_size=config.get("mp_replicate_size", 1),
mp_shard_size=config.get("mp_shard_size", None),
dp_size=config.get("dp_size", None),
cp_size=config.get("cp_size", 1),
)
timeout = config.get("timeout", 18000)
if self._is_distributed:
init_process_group(timeout=timedelta(seconds=timeout), backend=helper.get_process_group_backend())
self.model_device_mesh = init_device_mesh(
device_type=self.current_device.type,
mesh_shape=self.strategy.model_mesh_shape,
mesh_dim_names=self.strategy.model_mesh_dim_names,
)
self.data_device_mesh = init_device_mesh(
device_type=self.current_device.type,
mesh_shape=self.strategy.data_mesh_shape,
mesh_dim_names=self.strategy.data_mesh_dim_names,
)
else:
self.model_device_mesh = None
self.data_device_mesh = None
self._initialized = True
logger.info_rank0(f"DistributedInterface initialized: {self}.")
def __str__(self) -> str:
return (
f"DistributedInterface(strategy={self.strategy}), is_distributed={self._is_distributed}, "
f"current_device={self.current_device}, rank={self._rank}, world_size={self._world_size}, "
f"model_device_mesh={self.model_device_mesh}, data_device_mesh={self.data_device_mesh}"
)
def get_device_mesh(self, dim: Dim | None = None) -> DeviceMesh | None:
if dim is None:
raise ValueError("dim must be specified.")
elif not self._is_distributed:
return None
elif dim in self.strategy.data_mesh_dim_names:
return self.data_device_mesh[dim.value]
else:
return self.model_device_mesh[dim.value]
def get_group(self, dim: Dim | None = None) -> Optional[ProcessGroup]:
if not self._is_distributed or dim is None:
return None
else:
return self.get_device_mesh(dim).get_group()
def get_rank(self, dim: Dim | None = None) -> int:
if not self._is_distributed:
return 0
elif dim is None:
return self._rank
else:
return self.get_device_mesh(dim).get_local_rank()
def get_world_size(self, dim: Dim | None = None) -> int:
if not self._is_distributed:
return 1
elif dim is None:
return self._world_size
else:
return self.get_device_mesh(dim).size()
def get_local_rank(self) -> int:
return self._local_rank
def get_local_world_size(self) -> int:
return self._local_world_size
def all_gather(self, data: TensorLike, dim: Dim | None = Dim.DP) -> TensorLike:
if self._is_distributed:
return helper.operate_tensorlike(helper.all_gather, data, group=self.get_group(dim))
else:
return data
def all_reduce(
self, data: TensorLike, op: helper.ReduceOp = helper.ReduceOp.MEAN, dim: Dim | None = Dim.DP
) -> TensorLike:
if self._is_distributed:
return helper.operate_tensorlike(helper.all_reduce, data, op=op, group=self.get_group(dim))
else:
return data
def broadcast(self, data: TensorLike, src: int = 0, dim: Dim | None = Dim.DP) -> TensorLike:
if self._is_distributed:
return helper.operate_tensorlike(helper.broadcast, data, src=src, group=self.get_group(dim))
else:
return data
def sync(self) -> None:
if self._is_distributed:
helper.synchronize()
def barrier(self) -> None:
if self._is_distributed:
barrier()
def destroy(self) -> None:
if self._is_distributed:
destroy_process_group()
if __name__ == "__main__":
"""
python -m llamafactory.v1.accelerator.interface
"""
print(DistributedInterface()) | --- +++ @@ -15,6 +15,16 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""A unified interface for model parallelism and data parallelism.
+
+Supports model parallelism types:
+- mp_replicate: Replicate model across multiple devices.
+- mp_shard: Shard model across multiple devices.
+
+And data parallelism types:
+- dp: Data parallelism.
+- cp: Context parallelism.
+"""
from dataclasses import dataclass
from datetime import timedelta
@@ -33,6 +43,7 @@
class Dim(StrEnum):
+ """Dimension names."""
MP_REPLICATE = "mp_replicate"
MP_SHARD = "mp_shard"
@@ -42,6 +53,7 @@
@dataclass
class DistributedStrategy:
+ """Distributed strategy."""
mp_replicate_size: int = 1
"""Model parallel replicate size, default to 1."""
@@ -75,27 +87,33 @@
@property
def model_mesh_shape(self) -> tuple[int, int]:
+ """Model parallel mesh shape."""
return (self.mp_replicate_size, self.mp_shard_size)
@property
def model_mesh_dim_names(self) -> tuple[str, str]:
+ """Model parallel mesh dimension names."""
return (Dim.MP_REPLICATE.value, Dim.MP_SHARD.value)
@property
def data_mesh_shape(self) -> tuple[int, int]:
+ """Data parallel mesh shape."""
return (self.dp_size, self.cp_size)
@property
def data_mesh_dim_names(self) -> tuple[str, str]:
+ """Data parallel mesh dimension names."""
return (Dim.DP.value, Dim.CP.value)
class DistributedInterface:
+ """Distributed interface."""
_instance: Optional["DistributedInterface"] = None
_initialized: bool = False
def __new__(cls, *args: Any, **kwargs: Any) -> "DistributedInterface":
+ """Singleton pattern."""
if cls._instance is None:
cls._instance = super().__new__(cls)
@@ -153,6 +171,7 @@ )
def get_device_mesh(self, dim: Dim | None = None) -> DeviceMesh | None:
+ """Get device mesh for specified dimension."""
if dim is None:
raise ValueError("dim must be specified.")
elif not self._is_distributed:
@@ -163,12 +182,14 @@ return self.model_device_mesh[dim.value]
def get_group(self, dim: Dim | None = None) -> Optional[ProcessGroup]:
+ """Get process group for specified dimension."""
if not self._is_distributed or dim is None:
return None
else:
return self.get_device_mesh(dim).get_group()
def get_rank(self, dim: Dim | None = None) -> int:
+ """Get parallel rank for specified dimension."""
if not self._is_distributed:
return 0
elif dim is None:
@@ -177,6 +198,7 @@ return self.get_device_mesh(dim).get_local_rank()
def get_world_size(self, dim: Dim | None = None) -> int:
+ """Get parallel size for specified dimension."""
if not self._is_distributed:
return 1
elif dim is None:
@@ -185,12 +207,15 @@ return self.get_device_mesh(dim).size()
def get_local_rank(self) -> int:
+ """Get parallel local rank."""
return self._local_rank
def get_local_world_size(self) -> int:
+ """Get parallel local world size."""
return self._local_world_size
def all_gather(self, data: TensorLike, dim: Dim | None = Dim.DP) -> TensorLike:
+ """Gather tensor across specified parallel group."""
if self._is_distributed:
return helper.operate_tensorlike(helper.all_gather, data, group=self.get_group(dim))
else:
@@ -199,26 +224,31 @@ def all_reduce(
self, data: TensorLike, op: helper.ReduceOp = helper.ReduceOp.MEAN, dim: Dim | None = Dim.DP
) -> TensorLike:
+ """Reduce tensor across specified parallel group."""
if self._is_distributed:
return helper.operate_tensorlike(helper.all_reduce, data, op=op, group=self.get_group(dim))
else:
return data
def broadcast(self, data: TensorLike, src: int = 0, dim: Dim | None = Dim.DP) -> TensorLike:
+ """Broadcast tensor across specified parallel group."""
if self._is_distributed:
return helper.operate_tensorlike(helper.broadcast, data, src=src, group=self.get_group(dim))
else:
return data
def sync(self) -> None:
+ """Synchronize all processes."""
if self._is_distributed:
helper.synchronize()
def barrier(self) -> None:
+ """Barrier all processes."""
if self._is_distributed:
barrier()
def destroy(self) -> None:
+ """Destroy all processes."""
if self._is_distributed:
destroy_process_group()
@@ -227,4 +257,4 @@ """
python -m llamafactory.v1.accelerator.interface
"""
- print(DistributedInterface())+ print(DistributedInterface())
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/accelerator/interface.py |
Add concise docstrings to each method | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import Any
from ....accelerator.helper import DeviceType, get_current_accelerator
from ....utils.types import HFModel
class BaseKernel(ABC):
_kernel_id: Any = "" # kernel ID, any hashable value to identify a kernel implementation
_device: DeviceType = DeviceType.CPU # "cuda", "npu", "cpu", etc.
@classmethod
def get_kernel_id(cls) -> str:
return cls._kernel_id
@classmethod
def get_device(cls) -> str:
return cls._device
@classmethod
def check_deps(cls) -> bool:
if cls._device != get_current_accelerator().type:
return False
return True
@classmethod
@abstractmethod
def apply(cls, **kwargs) -> HFModel:
if not cls.check_deps():
raise RuntimeError(f"{cls.__name__} is not available but {cls.__name__} kernel was called.")
raise NotImplementedError | --- +++ @@ -12,6 +12,13 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of base kernel class.
+
+Init Phase:
+1. Define base kernel class.
+2. Define abstract methods.
+
+"""
from abc import ABC, abstractmethod
from typing import Any
@@ -21,20 +28,36 @@
class BaseKernel(ABC):
+ r"""Base class for all kernel implementations.
+
+ Subclasses must implement the abstract methods and define the required class attributes.
+ """
_kernel_id: Any = "" # kernel ID, any hashable value to identify a kernel implementation
_device: DeviceType = DeviceType.CPU # "cuda", "npu", "cpu", etc.
@classmethod
def get_kernel_id(cls) -> str:
+ """Returns the unique identifier for the kernel."""
return cls._kernel_id
@classmethod
def get_device(cls) -> str:
+ """Returns the device type associated with the kernel (e.g., "cuda", "npu", "cpu")."""
return cls._device
@classmethod
def check_deps(cls) -> bool:
+ """Checks if the required dependencies for the kernel are available.
+
+ Returns:
+ bool: ``True`` if dependencies are met, ``False`` otherwise.
+
+ .. note::
+ In explicit mode, if a user specifies an implementation but this check fails,
+ it should raise an error instead of silently switching.
+ Kernels can override this method to implement custom dependency checks.
+ """
if cls._device != get_current_accelerator().type:
return False
return True
@@ -42,6 +65,23 @@ @classmethod
@abstractmethod
def apply(cls, **kwargs) -> HFModel:
+ """Applies the kernel optimization to the model.
+
+ Args:
+ **kwargs: Arbitrary keyword arguments, usually containing the model instance and the kernel configuration.
+
+ Returns:
+ HFModel: The model with the kernel applied.
+
+ Raises:
+ RuntimeError: If the kernel dependencies are not met.
+ NotImplementedError: If the method is not implemented by the subclass.
+
+ Example:
+ >>> from llamafactory.v1.plugins.model_plugins.kernels.interface import apply_kernel
+ >>> model = HFModel(config=config)
+ >>> model = apply_kernel(model=model, kernel_id="npu_fused_moe")
+ """
if not cls.check_deps():
raise RuntimeError(f"{cls.__name__} is not available but {cls.__name__} kernel was called.")
- raise NotImplementedError+ raise NotImplementedError
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/kernels/base.py |
Fill in missing docstrings in my code | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
import torch
import torch.nn.functional as F
from ..accelerator.helper import ReduceOp
from ..accelerator.interface import Dim, DistributedInterface
from ..config import TrainingArguments
from ..utils import logging
from ..utils.helper import compute_valid_tokens
from ..utils.types import BatchInput, HFModel, ModelOutput, Tensor, TorchDataset
from .utils.batching import BatchGenerator
from .utils.rendering import Renderer
logger = logging.get_logger(__name__)
class BaseTrainer:
def __init__(
self,
args: TrainingArguments,
model: HFModel,
renderer: Renderer,
train_dataset: TorchDataset,
) -> None:
self.args = args
self.model = model
self.renderer = renderer
self.train_dataset = train_dataset
# info
self.global_step = 0
# cached variables
self.device = DistributedInterface().current_device
self.dp_size = DistributedInterface().get_world_size(Dim.DP)
self.model_input_names = self.renderer.processor.model_input_names
self._create_batch_generator()
# Calculate num_training_steps: max_steps takes priority if set
if self.args.max_steps is not None and self.args.max_steps > 0:
self.num_training_steps = self.args.max_steps
else:
self.num_training_steps = self.args.num_train_epochs * len(self.train_batch_generator)
if self.args.enable_activation_checkpointing:
self.model.gradient_checkpointing_enable({"use_reentrant": False})
self._deepspeed_engine = None
dist_name = self.args.dist_config.name if self.args.dist_config is not None else None
if dist_name == "deepspeed":
from ..plugins.trainer_plugins.distributed.hub import DistributedPlugin
self._deepspeed_engine = DistributedPlugin("deepspeed")(
self.model,
self.args.dist_config,
num_micro_batch=self.train_batch_generator.num_micro_batch,
micro_batch_size=self.args.micro_batch_size,
)
self._init_optimizer()
self._init_lr_scheduler()
self.model, self.optimizer, self.lr_scheduler = self._deepspeed_engine.prepare(
self.model, self.optimizer, self.lr_scheduler
)
else:
# fsdp2 / DDP / no dist
self._shard_model()
self._init_optimizer()
self._init_lr_scheduler()
def _create_batch_generator(self) -> None:
self.train_batch_generator = BatchGenerator(
dataset=self.train_dataset,
renderer=self.renderer,
micro_batch_size=self.args.micro_batch_size,
global_batch_size=self.args.global_batch_size,
cutoff_len=self.args.cutoff_len,
batching_workers=self.args.batching_workers,
batching_strategy=self.args.batching_strategy,
seed=self.args.seed,
)
def _shard_model(self) -> None:
if self.args.dist_config is None:
if DistributedInterface().get_world_size(Dim.DP) > 1:
from torch.nn.parallel import DistributedDataParallel as DDP
logger.warning_rank0(
"dist_config is None but distributed training is enabled; falling back to DistributedDataParallel."
)
device_ids = None if self.device.type == "cpu" else [self.device.index]
self.model = DDP(self.model, device_ids=device_ids)
else:
from ..plugins.trainer_plugins.distributed.hub import DistributedPlugin
self.model = DistributedPlugin(self.args.dist_config.name)(
self.model,
self.args.dist_config,
)
def _init_optimizer(self) -> None:
if self.args.optim_config is None:
_trainable_params = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = torch.optim.AdamW(_trainable_params, lr=self.args.learning_rate)
else:
from ..plugins.trainer_plugins.optimizer import OptimizerPlugin
self.optimizer = OptimizerPlugin(self.args.optim_config.name)(self.model, self.args.optim_config)
def _init_lr_scheduler(self) -> None:
if self.args.lr_scheduler_config is None:
self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda x: 1.0)
else:
from ..plugins.trainer_plugins.lr_scheduler import LRSchedulerPlugin
self.lr_scheduler = LRSchedulerPlugin(self.args.lr_scheduler_config.name)(
self.optimizer, self.num_training_steps, self.args.lr_scheduler_config
)
def compute_log_probs(self, model: HFModel, batch: BatchInput) -> Tensor:
batch_size, _ = batch["labels"].shape
model_inputs = {
k: v.to(self.device, non_blocking=True) for k, v in batch.items() if k in self.model_input_names
}
labels = batch["labels"].to(self.device, non_blocking=True)
outputs: ModelOutput = model(**model_inputs)
logits = outputs.logits.float()
shift_labels = labels[..., 1:].contiguous().view(-1)
shift_logits = logits[..., :-1, :].contiguous().view(shift_labels.size(0), -1)
return -F.cross_entropy(shift_logits, shift_labels, reduction="none").view(batch_size, -1)
@abstractmethod
def compute_loss(self, batch: BatchInput) -> Tensor:
...
def fit(self) -> None:
self.model.train()
for epoch in range(self.args.num_train_epochs):
self.train_batch_generator.set_epoch(epoch)
for micro_batches in self.train_batch_generator:
self.global_step += 1
step_loss = 0
step_valid_tokens = compute_valid_tokens(micro_batches)
step_valid_tokens = DistributedInterface().all_reduce(step_valid_tokens, op=ReduceOp.SUM)
num_micro = len(micro_batches)
for i, micro_batch in enumerate(micro_batches):
loss = self.compute_loss(micro_batch)
mini_step_valid_tokens = compute_valid_tokens([micro_batch])
# fsdp uses mean reduction so we need to scale the loss by dp_size
loss = loss * mini_step_valid_tokens * self.dp_size / (step_valid_tokens + 1e-6)
if self._deepspeed_engine is not None:
# deepspeed: set sync_gradients so engine.step() only fires on last micro-batch
self._deepspeed_engine.accelerator.sync_gradients = i == num_micro - 1
self._deepspeed_engine.backward(loss)
else:
loss.backward()
step_loss += loss.item()
if self._deepspeed_engine is not None:
# deepspeed: engine.step() already ran inside backward at the sync boundary
grad_norm = self._deepspeed_engine.get_grad_norm()
else:
grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm).item()
# isfinite(): argument 'input' (position 1) must be Tensor, not float
if not torch.isfinite(torch.tensor(grad_norm)): # type: ignore # pyright: ignore [reportUnknownReturnType]
logger.warning_rank0(f"Gradient norm is not finite: {grad_norm}")
else:
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
step_loss, grad_norm = DistributedInterface().all_reduce([step_loss, grad_norm])
DistributedInterface().sync()
if DistributedInterface().get_rank() == 0:
print(f"Epoch {epoch}, Step {self.global_step}, Loss: {step_loss:.4f}, Grad Norm: {grad_norm:.4f}")
# Check if max_steps is reached
if self.global_step >= self.num_training_steps:
logger.info_rank0(f"Reached max_steps ({self.num_training_steps}), stopping training.")
return
def save_model(self) -> None:
if self.args.dist_config is not None and self.args.dist_config.name in ("deepspeed", "fsdp2"):
from ..plugins.trainer_plugins.distributed.hub import DistributedPlugin
DistributedPlugin(self.args.dist_config.name).save_model(
self.model, self.args.output_dir, self.renderer.processor
)
else:
model_to_save = self.model.module if hasattr(self.model, "module") else self.model
model_to_save.save_pretrained(self.args.output_dir, max_shard_size="4GB")
self.renderer.processor.save_pretrained(self.args.output_dir, max_shard_size="4GB")
logger.info_rank0(f"Model saved to {self.args.output_dir}") | --- +++ @@ -12,6 +12,20 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of trainer.
+
+Init Phase:
+
+1. Init batch generator.
+2. Init optimizer (deepspeed).
+3. Shard model.
+4. Init optimizer (fsdp).
+5. Init lr scheduler.
+
+Train Phase:
+1. Train Loop
+
+"""
from abc import abstractmethod
@@ -116,6 +130,7 @@ )
def _init_optimizer(self) -> None:
+ """Init optimizer."""
if self.args.optim_config is None:
_trainable_params = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = torch.optim.AdamW(_trainable_params, lr=self.args.learning_rate)
@@ -125,6 +140,7 @@ self.optimizer = OptimizerPlugin(self.args.optim_config.name)(self.model, self.args.optim_config)
def _init_lr_scheduler(self) -> None:
+ """Init lr scheduler."""
if self.args.lr_scheduler_config is None:
self.lr_scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda x: 1.0)
else:
@@ -135,6 +151,10 @@ )
def compute_log_probs(self, model: HFModel, batch: BatchInput) -> Tensor:
+ """Compute log probs.
+
+ log_probs: Tensor of shape (batch_size, seq_len - 1)
+ """
batch_size, _ = batch["labels"].shape
model_inputs = {
k: v.to(self.device, non_blocking=True) for k, v in batch.items() if k in self.model_input_names
@@ -148,9 +168,11 @@
@abstractmethod
def compute_loss(self, batch: BatchInput) -> Tensor:
+ """Compute the scalar loss."""
...
def fit(self) -> None:
+ """Train the model."""
self.model.train()
for epoch in range(self.args.num_train_epochs):
self.train_batch_generator.set_epoch(epoch)
@@ -200,6 +222,7 @@ return
def save_model(self) -> None:
+ """Save the model."""
if self.args.dist_config is not None and self.args.dist_config.name in ("deepspeed", "fsdp2"):
from ..plugins.trainer_plugins.distributed.hub import DistributedPlugin
@@ -210,4 +233,4 @@ model_to_save = self.model.module if hasattr(self.model, "module") else self.model
model_to_save.save_pretrained(self.args.output_dir, max_shard_size="4GB")
self.renderer.processor.save_pretrained(self.args.output_dir, max_shard_size="4GB")
- logger.info_rank0(f"Model saved to {self.args.output_dir}")+ logger.info_rank0(f"Model saved to {self.args.output_dir}")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/core/base_trainer.py |
Add inline docstrings for readability | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import torch
import torch.nn.functional as F
try:
import torch_npu
except ImportError:
pass
from ......accelerator.helper import DeviceType
from ......utils.packages import is_transformers_version_greater_than
from ......utils.types import HFModel
from ...base import BaseKernel
from ...registry import register_kernel
class GmmFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, weight, group_list):
ctx.save_for_backward(x, weight)
ctx.group_list = group_list
fwd_output = torch_npu.npu_grouped_matmul(
[x], [weight], bias=None, group_list=group_list, split_item=2, group_type=0, group_list_type=1
)[0]
return fwd_output
@staticmethod
def backward(ctx, grad_output):
input_tensor, weight = ctx.saved_tensors
group_list = ctx.group_list
weight = torch.transpose(weight, 1, 2)
grad_input = torch_npu.npu_grouped_matmul(
[grad_output], [weight], bias=None, group_list=group_list, split_item=2, group_type=0, group_list_type=1
)[0]
grad_weight = torch_npu.npu_grouped_matmul(
[input_tensor.T],
[grad_output],
bias=None,
group_list=group_list,
split_item=3,
group_type=2,
group_list_type=1,
)[0]
return grad_input, grad_weight, None
class HybridGmmFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, num_experts, *args):
x_list = list(args[:num_experts])
weight_list = list(args[num_experts:])
split_sizes = [x.shape[0] for x in x_list]
ctx.split_sizes = split_sizes
ctx.num_experts = num_experts
ctx.save_for_backward(*args)
outputs = torch_npu.npu_grouped_matmul(
x_list, weight_list, bias=None, group_list=None, split_item=0, group_type=-1
)
return tuple(outputs)
@staticmethod
def backward(ctx, *grad_outputs):
saved_tensors = ctx.saved_tensors
num_experts = ctx.num_experts
split_sizes = ctx.split_sizes
x_list = list(saved_tensors[:num_experts])
weight_list = list(saved_tensors[num_experts:])
grad_outputs_contiguous = [g.contiguous() for g in grad_outputs]
w_t_list = [w.t() for w in weight_list]
grad_x_list = torch_npu.npu_grouped_matmul(
grad_outputs_contiguous, # List[Tensor], 每个 [M_i, N]
w_t_list, # List[Tensor], 每个 [N, K] (view)
bias=None,
group_list=None,
split_item=0,
group_type=-1,
)
x_concat = torch.cat(x_list, dim=0)
dy_concat = torch.cat(grad_outputs_contiguous, dim=0) # [Total_M, N]
group_list = torch.tensor(split_sizes, device=x_concat.device, dtype=torch.int64)
grad_w_stack = torch_npu.npu_grouped_matmul(
[x_concat.t()],
[dy_concat],
bias=None,
group_list=group_list,
split_item=3,
group_type=2,
group_list_type=1,
)[0]
if grad_w_stack.dim() == 3:
grad_w_list = list(torch.unbind(grad_w_stack, dim=0))
else:
raise RuntimeError(f"Unexpected grad_w_stack shape: {grad_w_stack.shape}")
return (None, *grad_x_list, *grad_w_list)
class NpuMoeFused:
@staticmethod
def npu_moe_experts_forward(
self, hidden_states: torch.Tensor, routing_weights: torch.Tensor, router_indices: torch.Tensor
) -> torch.Tensor:
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size)
permuted_hidden_states, row_ids_map = torch_npu.npu_moe_token_permute(
hidden_states, router_indices.to(torch.int32)
)
tokens_per_expert = torch.histc(router_indices, bins=self.num_experts, min=0, max=self.num_experts)
intermediate_hidden_states = GmmFunction.apply(permuted_hidden_states, self.gate_up_proj, tokens_per_expert)
intermediate_activations = torch_npu.npu_swiglu(intermediate_hidden_states, dim=-1)
output = GmmFunction.apply(intermediate_activations, self.down_proj, tokens_per_expert)
next_states = torch_npu.npu_moe_token_unpermute(output, row_ids_map, probs=routing_weights)
next_states = next_states.view(batch_size, -1, self.hidden_size)
return next_states
@staticmethod
def npu_moe_sparse_block_forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size)
router_logits = self.gate(hidden_states)
routing_weights = torch.nn.functional.softmax(router_logits, dim=-1, dtype=torch.float)
routing_weights, router_indices = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights = routing_weights / routing_weights.sum(dim=-1, keepdim=True)
routing_weights = routing_weights.to(hidden_states.dtype)
hidden_states = hidden_states.reshape(batch_size, -1, self.hidden_size)
routed_out = self.experts(hidden_states, routing_weights, router_indices)
return routed_out
class Qwen3NpuMoeFused:
@staticmethod
def qwen3moe_sparse_moe_block_forward(self, hidden_states: torch.Tensor):
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
if self.norm_topk_prob:
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
routing_weights = routing_weights.to(hidden_states.dtype)
permuted_hidden_states, row_ids_map = torch_npu.npu_moe_token_permute(hidden_states, selected_experts.int())
tokens_per_expert = torch.histc(
selected_experts.float(), bins=self.num_experts, min=0, max=self.num_experts
).long()
split_sizes = tokens_per_expert.tolist()
input_list = list(torch.split(permuted_hidden_states, split_sizes, dim=0))
gate_weights = [e.gate_proj.weight.t() for e in self.experts]
up_weights = [e.up_proj.weight.t() for e in self.experts]
down_weights = [e.down_proj.weight.t() for e in self.experts]
gate_out_tuple = HybridGmmFunction.apply(len(input_list), *input_list, *gate_weights)
up_out_tuple = HybridGmmFunction.apply(len(input_list), *input_list, *up_weights)
inter_list = [F.silu(g) * u for g, u in zip(gate_out_tuple, up_out_tuple)]
down_out_tuple = HybridGmmFunction.apply(len(inter_list), *inter_list, *down_weights)
grouped_output = torch.cat(down_out_tuple, dim=0)
next_states = torch_npu.npu_moe_token_unpermute(grouped_output, row_ids_map, probs=routing_weights)
next_states = next_states.view(batch_size, sequence_length, -1)
return next_states, router_logits
# moe patch config mapping
kernel_moe_mapping = {
"Qwen3VLMoeForConditionalGeneration": {
"Qwen3VLMoeTextExperts": NpuMoeFused.npu_moe_experts_forward,
"Qwen3VLMoeTextSparseMoeBlock": NpuMoeFused.npu_moe_sparse_block_forward,
}
}
if not is_transformers_version_greater_than("5.0.0"):
kernel_moe_mapping["Qwen3MoeForCausalLM"] = {
"Qwen3MoeSparseMoeBlock": Qwen3NpuMoeFused.qwen3moe_sparse_moe_block_forward
}
@register_kernel
class NpuFusedMoEKernel(BaseKernel):
_kernel_id = "npu_fused_moe"
_device = DeviceType.NPU
@classmethod
def apply(cls, **kwargs) -> HFModel:
model = kwargs.get("model", None)
if model is None:
raise ValueError(f"HFModel instance is required for {cls.__name__}.")
if not cls.check_deps():
raise RuntimeError("torch_npu is not available but NpuMoEFusedMoEKernel was called.")
archs = getattr(model.config, "architectures", None) or []
target_moe_mapping = None
for arch in archs:
if arch in kernel_moe_mapping:
target_moe_mapping = kernel_moe_mapping[arch]
break
if target_moe_mapping is None:
return model
for module in model.modules():
class_name = module.__class__.__name__
if class_name in target_moe_mapping:
new_forward_func = target_moe_mapping[class_name]
module.forward = types.MethodType(new_forward_func, module)
return model | --- +++ @@ -12,6 +12,14 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of NPU fused MoE kernels.
+
+Init Phase:
+1. Define GMM functions.
+2. Define NPU fused MoE functions.
+3. Register NPU fused MoE kernel.
+
+"""
import types
@@ -32,9 +40,21 @@
class GmmFunction(torch.autograd.Function):
+ """Custom autograd function for NPU Grouped Matrix Multiplication (GMM)."""
@staticmethod
def forward(ctx, x, weight, group_list):
+ """Performs the forward pass of Grouped Matrix Multiplication.
+
+ Args:
+ ctx: Context object to save tensors for backward pass.
+ x (Tensor): Input tensor.
+ weight (Tensor): Weight tensor.
+ group_list (list): List of group sizes.
+
+ Returns:
+ Tensor: The result of the grouped matrix multiplication.
+ """
ctx.save_for_backward(x, weight)
ctx.group_list = group_list
@@ -45,6 +65,15 @@
@staticmethod
def backward(ctx, grad_output):
+ """Performs the backward pass of Grouped Matrix Multiplication.
+
+ Args:
+ ctx: Context object containing saved tensors.
+ grad_output (Tensor): Gradient with respect to the output.
+
+ Returns:
+ tuple: Gradients with respect to input, weight, and None for group_list.
+ """
input_tensor, weight = ctx.saved_tensors
group_list = ctx.group_list
@@ -65,9 +94,20 @@
class HybridGmmFunction(torch.autograd.Function):
+ """Custom autograd function for Hybrid Grouped Matrix Multiplication on NPU."""
@staticmethod
def forward(ctx, num_experts, *args):
+ """Performs the forward pass of Hybrid GMM.
+
+ Args:
+ ctx: Context object to save tensors.
+ num_experts (int): Number of experts.
+ *args: Variable length argument list containing inputs and weights.
+
+ Returns:
+ tuple: The outputs of the grouped matrix multiplication.
+ """
x_list = list(args[:num_experts])
weight_list = list(args[num_experts:])
@@ -84,6 +124,15 @@
@staticmethod
def backward(ctx, *grad_outputs):
+ """Performs the backward pass of Hybrid GMM.
+
+ Args:
+ ctx: Context object containing saved tensors.
+ *grad_outputs: Gradients with respect to the outputs.
+
+ Returns:
+ tuple: Gradients with respect to inputs and weights.
+ """
saved_tensors = ctx.saved_tensors
num_experts = ctx.num_experts
split_sizes = ctx.split_sizes
@@ -127,11 +176,23 @@
class NpuMoeFused:
+ """Container for NPU fused MoE forward functions."""
@staticmethod
def npu_moe_experts_forward(
self, hidden_states: torch.Tensor, routing_weights: torch.Tensor, router_indices: torch.Tensor
) -> torch.Tensor:
+ """Forward pass for MoE experts using NPU fused operations.
+
+ Args:
+ self: The MoE layer instance.
+ hidden_states (Tensor): Input hidden states.
+ routing_weights (Tensor): Routing weights.
+ router_indices (Tensor): Router indices.
+
+ Returns:
+ Tensor: Output tensor after expert computation.
+ """
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size)
permuted_hidden_states, row_ids_map = torch_npu.npu_moe_token_permute(
@@ -147,6 +208,15 @@
@staticmethod
def npu_moe_sparse_block_forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ r"""Forward pass for sparse MoE block using NPU optimization.
+
+ Args:
+ self: The MoE sparse block instance.
+ hidden_states (Tensor): Input hidden states.
+
+ Returns:
+ Tensor: The routed output.
+ """
batch_size = hidden_states.shape[0]
hidden_states = hidden_states.reshape(-1, self.hidden_size)
router_logits = self.gate(hidden_states)
@@ -160,9 +230,19 @@
class Qwen3NpuMoeFused:
+ """Container for Qwen3 NPU fused MoE forward functions."""
@staticmethod
def qwen3moe_sparse_moe_block_forward(self, hidden_states: torch.Tensor):
+ """Forward pass for Qwen3 sparse MoE block using NPU fused operations.
+
+ Args:
+ self: The Qwen3 MoE block instance.
+ hidden_states (Tensor): Input hidden states.
+
+ Returns:
+ tuple: A tuple containing the next states and router logits.
+ """
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
@@ -218,12 +298,25 @@
@register_kernel
class NpuFusedMoEKernel(BaseKernel):
+ """NPU Fused MoE Kernel implementation."""
_kernel_id = "npu_fused_moe"
_device = DeviceType.NPU
@classmethod
def apply(cls, **kwargs) -> HFModel:
+ """Applies the NPU fused MoE kernel to the model.
+
+ Args:
+ **kwargs: Keyword arguments containing the model.
+
+ Returns:
+ HFModel: The model with patched MoE forward functions.
+
+ Raises:
+ ValueError: If the model is not provided.
+ RuntimeError: If dependencies are not met.
+ """
model = kwargs.get("model", None)
if model is None:
raise ValueError(f"HFModel instance is required for {cls.__name__}.")
@@ -247,4 +340,4 @@ new_forward_func = target_moe_mapping[class_name]
module.forward = types.MethodType(new_forward_func, module)
- return model+ return model
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/kernels/ops/mlp/npu_fused_moe.py |
Add concise docstrings to each method | # Copyright 2025 HuggingFace Inc., THUDM, and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library and the THUDM's ChatGLM implementation.
# https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/summarization/run_summarization.py
# https://github.com/THUDM/ChatGLM-6B/blob/main/ptuning/main.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
import numpy as np
import torch
from transformers.utils import is_nltk_available
from ...extras.constants import IGNORE_INDEX
from ...extras.misc import numpify
from ...extras.packages import is_jieba_available, is_rouge_available
if TYPE_CHECKING:
from transformers import EvalPrediction, PreTrainedTokenizer
if is_jieba_available():
import jieba # type: ignore
if is_nltk_available():
from nltk.translate.bleu_score import SmoothingFunction, sentence_bleu # type: ignore
if is_rouge_available():
from rouge_chinese import Rouge # type: ignore
def eval_logit_processor(logits: "torch.Tensor", labels: "torch.Tensor") -> "torch.Tensor":
if isinstance(logits, (list, tuple)):
if logits[0].dim() == 3: # (batch_size, seq_len, vocab_size)
logits = logits[0]
else: # moe models have aux loss
logits = logits[1]
if logits.dim() != 3:
raise ValueError("Cannot process the logits.")
return torch.argmax(logits, dim=-1)
@dataclass
class ComputeAccuracy:
def _dump(self) -> Optional[dict[str, float]]:
result = None
if hasattr(self, "score_dict"):
result = {k: float(np.mean(v)) for k, v in self.score_dict.items()}
self.score_dict = {"accuracy": []}
return result
def __post_init__(self):
self._dump()
def __call__(self, eval_preds: "EvalPrediction", compute_result: bool = True) -> Optional[dict[str, float]]:
preds, labels = numpify(eval_preds.predictions), numpify(eval_preds.label_ids)
for i in range(len(preds)):
pred, label = preds[i, :-1], labels[i, 1:]
label_mask = label != IGNORE_INDEX
self.score_dict["accuracy"].append(np.mean(pred[label_mask] == label[label_mask]))
if compute_result:
return self._dump()
@dataclass
class ComputeSimilarity:
tokenizer: "PreTrainedTokenizer"
def _dump(self) -> Optional[dict[str, float]]:
result = None
if hasattr(self, "score_dict"):
result = {k: float(np.mean(v)) for k, v in self.score_dict.items()}
self.score_dict = {"rouge-1": [], "rouge-2": [], "rouge-l": [], "bleu-4": []}
return result
def __post_init__(self):
self._dump()
def __call__(self, eval_preds: "EvalPrediction", compute_result: bool = True) -> Optional[dict[str, float]]:
preds, labels = numpify(eval_preds.predictions), numpify(eval_preds.label_ids)
preds = np.where(preds != IGNORE_INDEX, preds, self.tokenizer.pad_token_id)
labels = np.where(labels != IGNORE_INDEX, labels, self.tokenizer.pad_token_id)
decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True)
decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True)
for pred, label in zip(decoded_preds, decoded_labels):
hypothesis = list(jieba.cut(pred))
reference = list(jieba.cut(label))
if len(" ".join(hypothesis).split()) == 0 or len(" ".join(reference).split()) == 0:
result = {"rouge-1": {"f": 0.0}, "rouge-2": {"f": 0.0}, "rouge-l": {"f": 0.0}}
else:
rouge = Rouge()
scores = rouge.get_scores(" ".join(hypothesis), " ".join(reference))
result = scores[0]
for k, v in result.items():
self.score_dict[k].append(round(v["f"] * 100, 4))
bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3)
self.score_dict["bleu-4"].append(round(bleu_score * 100, 4))
if compute_result:
return self._dump() | --- +++ @@ -45,6 +45,7 @@
def eval_logit_processor(logits: "torch.Tensor", labels: "torch.Tensor") -> "torch.Tensor":
+ r"""Compute the token with the largest likelihood to reduce memory footprint."""
if isinstance(logits, (list, tuple)):
if logits[0].dim() == 3: # (batch_size, seq_len, vocab_size)
logits = logits[0]
@@ -59,6 +60,7 @@
@dataclass
class ComputeAccuracy:
+ r"""Compute accuracy and support `batch_eval_metrics`."""
def _dump(self) -> Optional[dict[str, float]]:
result = None
@@ -84,6 +86,10 @@
@dataclass
class ComputeSimilarity:
+ r"""Compute text similarity scores and support `batch_eval_metrics`.
+
+ Wraps the tokenizer into metric functions, used in CustomSeq2SeqTrainer.
+ """
tokenizer: "PreTrainedTokenizer"
@@ -125,4 +131,4 @@ self.score_dict["bleu-4"].append(round(bleu_score * 100, 4))
if compute_result:
- return self._dump()+ return self._dump()
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/sft/metric.py |
Write docstrings including parameters and return values | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from ....utils.constants import IGNORE_INDEX
from ....utils.helper import get_tokenizer
from ....utils.types import Message, ModelInput, Processor, ToolCall
from ..rendering import RenderingPlugin
def _update_model_input(
processor: Processor,
input_ids: list[int],
labels: list[int],
loss_weights: list[int],
temp_str: str,
temp_weight: float,
) -> str:
if not temp_str:
return ""
tokenizer = get_tokenizer(processor)
temp_ids = tokenizer.encode(temp_str, add_special_tokens=False)
input_ids.extend(temp_ids)
loss_weights.extend([temp_weight] * len(temp_ids))
if temp_weight > 1e-6:
labels.extend(temp_ids)
else:
labels.extend([IGNORE_INDEX] * len(temp_ids))
return ""
def _concat_text_content(message: Message) -> str:
message_text = ""
for content in message["content"]:
if content["type"] == "text":
message_text += content["value"]
else:
raise ValueError(f"Unsupported content type: {content['type']}")
return message_text
def _get_last_query_index(messages: list[Message]) -> int:
last_query_index = len(messages) - 1
for idx in range(len(messages) - 1, -1, -1):
message = messages[idx]
if message["role"] != "user":
continue
user_text = ""
is_plain_text = True
for content in message["content"]:
if content["type"] != "text":
is_plain_text = False
break
user_text += content["value"]
if not is_plain_text:
continue
if not (user_text.startswith("<tool_response>") and user_text.endswith("</tool_response>")):
last_query_index = idx
break
return last_query_index
def _split_assistant_content(message: Message) -> tuple[str, str, list[ToolCall]]:
text_content = ""
reasoning_content = ""
tool_calls: list[ToolCall] = []
for content in message["content"]:
if content["type"] == "text":
text_content += content["value"]
elif content["type"] == "reasoning":
reasoning_content += content["value"]
elif content["type"] == "tool_call":
try:
tool_call: ToolCall = json.loads(content["value"])
except json.JSONDecodeError:
raise ValueError(f"Invalid tool call format: {content['value']}.")
tool_calls.append(tool_call)
else:
raise ValueError(f"Unsupported content type: {content['type']}")
return text_content, reasoning_content, tool_calls
@RenderingPlugin("qwen3").register("render_messages")
def render_qwen3_messages(
processor: Processor,
messages: list[Message],
tools: str | None = None,
is_generate: bool = False,
enable_thinking: bool = False,
) -> ModelInput:
input_ids, labels, loss_weights = [], [], []
temp_str, temp_weight = "", 0.0
if tools:
temp_str += "<|im_start|>system\n"
if messages[0]["role"] == "system":
temp_str += _concat_text_content(messages[0]) + "\n\n"
temp_weight = messages[0].get("loss_weight", 0.0)
temp_str += (
"# Tools\n\nYou may call one or more functions to assist with the user query.\n\n"
"You are provided with function signatures within <tools></tools> XML tags:\n<tools>"
)
try:
tools = json.loads(tools)
except json.JSONDecodeError:
raise ValueError(f"Invalid tools format: {str(tools)}.")
if not isinstance(tools, list):
tools = [tools]
for tool in tools:
temp_str += "\n" + json.dumps(tool, ensure_ascii=False)
temp_str += (
"\n</tools>\n\nFor each function call, return a json object with function name "
'and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{"name": '
'<function-name>, "arguments": <args-json-object>}\n</tool_call><|im_end|>\n'
)
elif messages[0]["role"] == "system":
temp_str += "<|im_start|>system\n" + _concat_text_content(messages[0]) + "<|im_end|>\n"
temp_weight = messages[0].get("loss_weight", 0.0)
temp_str = _update_model_input(processor, input_ids, labels, loss_weights, temp_str, temp_weight)
last_query_index = _get_last_query_index(messages)
for turn_idx, message in enumerate(messages):
if message["role"] == "user" or (message["role"] == "system" and turn_idx != 0):
temp_str += "<|im_start|>" + message["role"] + "\n" + _concat_text_content(message) + "<|im_end|>\n"
temp_weight = message.get("loss_weight", 0.0)
elif message["role"] == "assistant":
temp_str += "<|im_start|>" + message["role"] + "\n"
text_content, reasoning_content, tool_calls = _split_assistant_content(message)
if turn_idx > last_query_index and (turn_idx == len(messages) - 1 or reasoning_content):
temp_str += "<think>\n" + reasoning_content.strip("\n") + "\n</think>\n\n" + text_content.lstrip("\n")
else:
temp_str += text_content
for tool_call_idx, tool_call in enumerate(tool_calls):
if (tool_call_idx == 0 and text_content) or tool_call_idx > 0:
temp_str += "\n"
arguments = tool_call.get("arguments")
if isinstance(arguments, str):
arguments_str = arguments
else:
arguments_str = json.dumps(arguments, ensure_ascii=False)
temp_str += (
'<tool_call>\n{"name": "'
+ tool_call["name"]
+ '", "arguments": '
+ arguments_str
+ "}\n</tool_call>"
)
temp_str += "<|im_end|>\n"
temp_weight = message.get("loss_weight", 1.0)
elif message["role"] == "tool":
if turn_idx == 0 or messages[turn_idx - 1]["role"] != "tool":
temp_str += "<|im_start|>user"
temp_str += "\n<tool_response>\n" + _concat_text_content(message) + "\n</tool_response>"
if turn_idx == len(messages) - 1 or messages[turn_idx + 1]["role"] != "tool":
temp_str += "<|im_end|>\n"
temp_weight = message.get("loss_weight", 0.0)
temp_str = _update_model_input(processor, input_ids, labels, loss_weights, temp_str, temp_weight)
if is_generate:
temp_str += "<|im_start|>assistant\n"
temp_weight = 0.0
if enable_thinking is False:
temp_str += "<think>\n\n</think>\n\n"
temp_str = _update_model_input(processor, input_ids, labels, loss_weights, temp_str, temp_weight)
attention_mask = [1] * len(input_ids)
return ModelInput(
input_ids=input_ids,
attention_mask=attention_mask,
labels=labels,
loss_weights=loss_weights,
)
@RenderingPlugin("qwen3").register("parse_message")
def parse_qwen3_message(generated_text: str) -> Message:
pattern = re.compile(r"<(think|tool_call)>\s*(.*?)\s*</\1>\s*", re.DOTALL)
content = []
last_end = 0
for match in pattern.finditer(generated_text):
start, end = match.span()
if start > last_end:
text = generated_text[last_end:start].strip()
if text:
content.append({"type": "text", "value": text})
tag_type = match.group(1)
tag_value = match.group(2).strip()
if tag_type == "think":
content.append({"type": "reasoning", "value": tag_value.strip()})
elif tag_type == "tool_call":
try:
json.loads(tag_value.strip())
except json.JSONDecodeError:
raise ValueError(f"Invalid tool call format: {tag_value.strip()}.")
content.append({"type": "tool_call", "value": tag_value.strip()})
last_end = end
if last_end < len(generated_text):
text = generated_text[last_end:].strip()
if text:
content.append({"type": "text", "value": text})
return Message(role="assistant", content=content) | --- +++ @@ -29,6 +29,7 @@ temp_str: str,
temp_weight: float,
) -> str:
+ """Update model input with temporary string."""
if not temp_str:
return ""
@@ -45,6 +46,7 @@
def _concat_text_content(message: Message) -> str:
+ """Concatenate text fields in a message."""
message_text = ""
for content in message["content"]:
if content["type"] == "text":
@@ -56,6 +58,7 @@
def _get_last_query_index(messages: list[Message]) -> int:
+ """Find the last user query index, excluding wrapped tool responses."""
last_query_index = len(messages) - 1
for idx in range(len(messages) - 1, -1, -1):
message = messages[idx]
@@ -81,6 +84,7 @@
def _split_assistant_content(message: Message) -> tuple[str, str, list[ToolCall]]:
+ """Split assistant message into text, reasoning and tool calls."""
text_content = ""
reasoning_content = ""
tool_calls: list[ToolCall] = []
@@ -111,6 +115,10 @@ is_generate: bool = False,
enable_thinking: bool = False,
) -> ModelInput:
+ """Render messages in the Qwen3 template format.
+
+ See https://huggingface.co/spaces/huggingfacejs/chat-template-playground?modelId=Qwen/Qwen3-8B
+ """
input_ids, labels, loss_weights = [], [], []
temp_str, temp_weight = "", 0.0
if tools:
@@ -210,6 +218,14 @@
@RenderingPlugin("qwen3").register("parse_message")
def parse_qwen3_message(generated_text: str) -> Message:
+ """Parse a message in the Qwen3 template format. Supports interleaved reasoning and tool calls.
+
+ Args:
+ generated_text (str): The generated text in the Qwen3 template format.
+
+ Returns:
+ Message: The parsed message.
+ """
pattern = re.compile(r"<(think|tool_call)>\s*(.*?)\s*</\1>\s*", re.DOTALL)
content = []
last_end = 0
@@ -240,4 +256,4 @@ if text:
content.append({"type": "text", "value": text})
- return Message(role="assistant", content=content)+ return Message(role="assistant", content=content)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/templates/qwen3.py |
Add docstrings for better understanding | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import torch
from ......accelerator.helper import DeviceType
from ......utils.logging import get_logger
from ......utils.types import HFModel
from ...base import BaseKernel
from ...registry import register_kernel
logger = get_logger(__name__)
try:
import torch_npu
except ImportError:
pass
def _apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = torch_npu.npu_rotary_mul(q, cos, sin)
k_embed = torch_npu.npu_rotary_mul(k, cos, sin)
return q_embed, k_embed
def _apply_multimodal_rotary_pos_emb_qwen25_vl(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
mrope_section = mrope_section * 2
cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
)
q_embed = torch_npu.npu_rotary_mul(q, cos, sin)
k_embed = torch_npu.npu_rotary_mul(k, cos, sin)
return q_embed, k_embed
@register_kernel
class NpuRoPEKernel(BaseKernel):
_kernel_id = "npu_fused_rope"
_device = DeviceType.NPU
@classmethod
def apply(cls, **kwargs) -> "HFModel":
if not cls.check_deps():
raise RuntimeError(f"torch_npu is not available but {cls.__name__} was called.")
model = kwargs.get("model", None)
if model is None:
raise ValueError(f"HFModel instance is required for {cls.__name__}.")
_modules = set()
for module in model.modules():
if "Attention" in module.__class__.__name__:
module_name = module.__class__.__module__
if module_name in _modules:
continue
try:
target_module = sys.modules[module_name]
if hasattr(target_module, "apply_rotary_pos_emb"):
if getattr(target_module, "apply_rotary_pos_emb") is not _apply_rotary_pos_emb:
setattr(target_module, "apply_rotary_pos_emb", _apply_rotary_pos_emb)
_modules.add(module_name)
if hasattr(target_module, "apply_multimodal_rotary_pos_emb"):
if (
getattr(target_module, "apply_multimodal_rotary_pos_emb")
is not _apply_multimodal_rotary_pos_emb_qwen25_vl
):
setattr(
target_module,
"apply_multimodal_rotary_pos_emb",
_apply_multimodal_rotary_pos_emb_qwen25_vl,
)
_modules.add(module_name)
except Exception as e:
logger.warning_rank0_once(f"Failed to apply RoPE kernel to module {module_name}: {e}")
return model | --- +++ @@ -12,6 +12,13 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of NPU fused RoPE kernels.
+
+Init Phase:
+1. Define RoPE forward functions.
+2. Register NPU fused RoPE kernel.
+
+"""
import sys
@@ -33,6 +40,19 @@
def _apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors using NPU optimization.
+
+ Args:
+ q (Tensor): Query tensor.
+ k (Tensor): Key tensor.
+ cos (Tensor): Cosine part of embedding.
+ sin (Tensor): Sine part of embedding.
+ position_ids (Tensor, optional): Position IDs. Default: ``None``.
+ unsqueeze_dim (int): Dimension to unsqueeze cos and sin. Default: 1.
+
+ Returns:
+ tuple: (q_embed, k_embed) The embedded query and key tensors.
+ """
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = torch_npu.npu_rotary_mul(q, cos, sin)
@@ -41,6 +61,19 @@
def _apply_multimodal_rotary_pos_emb_qwen25_vl(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding with multimodal sections (Qwen2-VL) on NPU.
+
+ Args:
+ q (Tensor): Query tensor.
+ k (Tensor): Key tensor.
+ cos (Tensor): Cosine part of embedding.
+ sin (Tensor): Sine part of embedding.
+ mrope_section (Tensor): Multimodal RoPE section.
+ unsqueeze_dim (int): Dimension to unsqueeze cos and sin. Default: 1.
+
+ Returns:
+ tuple: (q_embed, k_embed) The embedded query and key tensors.
+ """
mrope_section = mrope_section * 2
cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
unsqueeze_dim
@@ -56,12 +89,30 @@
@register_kernel
class NpuRoPEKernel(BaseKernel):
+ """NPU Kernel for Rotary Position Embedding."""
_kernel_id = "npu_fused_rope"
_device = DeviceType.NPU
@classmethod
def apply(cls, **kwargs) -> "HFModel":
+ """Apply RoPE acceleration by monkey-patching `apply_rotary_pos_emb`.
+
+ This function iterates through the model's modules to find attention layers,
+ identifies the module where they are defined, and replaces the original
+ `apply_rotary_pos_emb` function in that module's namespace with the
+ NPU-accelerated version from this file.
+
+ Args:
+ **kwargs: Keyword arguments containing the model.
+
+ Returns:
+ HFModel: The model with patched RoPE functions.
+
+ Raises:
+ RuntimeError: If dependencies are not met.
+ ValueError: If the model is not provided.
+ """
if not cls.check_deps():
raise RuntimeError(f"torch_npu is not available but {cls.__name__} was called.")
@@ -95,4 +146,4 @@ except Exception as e:
logger.warning_rank0_once(f"Failed to apply RoPE kernel to module {module_name}: {e}")
- return model+ return model
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/kernels/ops/rope/npu_rope.py |
Generate consistent docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from collections.abc import Callable
from typing import Any
from . import logging
logger = logging.get_logger(__name__)
class BasePlugin:
_registry: dict[str, dict[str, Callable]] = defaultdict(dict)
def __init__(self, name: str | None = None) -> None:
self.name = name
def register(self, method_name: str = "__call__") -> Callable:
if self.name is None:
raise ValueError("Plugin name should be specified.")
if method_name in self._registry[self.name]:
logger.warning_rank0_once(f"Method {method_name} of plugin {self.name} is already registered.")
def decorator(func: Callable) -> Callable:
self._registry[self.name][method_name] = func
return func
return decorator
def __call__(self, *args, **kwargs) -> Any:
return self["__call__"](*args, **kwargs)
def __getattr__(self, method_name: str) -> Callable:
return self[method_name]
def __getitem__(self, method_name: str) -> Callable:
if method_name not in self._registry[self.name]:
raise ValueError(f"Method {method_name} of plugin {self.name} is not registered.")
return self._registry[self.name][method_name]
if __name__ == "__main__":
"""
python -m llamafactory.v1.utils.plugin
"""
class PrintPlugin(BasePlugin):
def again(self): # optional
self["again"]()
@PrintPlugin("hello").register()
def print_hello():
print("Hello world!")
@PrintPlugin("hello").register("again")
def print_hello_again():
print("Hello world! Again.")
PrintPlugin("hello")()
PrintPlugin("hello").again() | --- +++ @@ -24,13 +24,40 @@
class BasePlugin:
+ """Base class for plugins.
+
+ A plugin is a callable object that can be registered and called by name.
+
+ Example usage:
+ ```python
+ class PrintPlugin(BasePlugin):
+ def again(self): # optional
+ self["again"]()
+
+
+ @PrintPlugin("hello").register()
+ def print_hello():
+ print("Hello world!")
+
+
+ @PrintPlugin("hello").register("again")
+ def print_hello_again():
+ print("Hello world! Again.")
+
+
+ PrintPlugin("hello")()
+ PrintPlugin("hello").again()
+ ```
+ """
_registry: dict[str, dict[str, Callable]] = defaultdict(dict)
def __init__(self, name: str | None = None) -> None:
+ """Initialize the plugin with a name."""
self.name = name
def register(self, method_name: str = "__call__") -> Callable:
+ """Decorator to register a function as a plugin."""
if self.name is None:
raise ValueError("Plugin name should be specified.")
@@ -44,12 +71,15 @@ return decorator
def __call__(self, *args, **kwargs) -> Any:
+ """Call the registered function with the given arguments."""
return self["__call__"](*args, **kwargs)
def __getattr__(self, method_name: str) -> Callable:
+ """Get the registered function with the given name."""
return self[method_name]
def __getitem__(self, method_name: str) -> Callable:
+ """Get the registered function with the given name."""
if method_name not in self._registry[self.name]:
raise ValueError(f"Method {method_name} of plugin {self.name} is not registered.")
@@ -74,4 +104,4 @@ print("Hello world! Again.")
PrintPlugin("hello")()
- PrintPlugin("hello").again()+ PrintPlugin("hello").again()
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/utils/plugin.py |
Add verbose docstrings with examples | # Copyright 2025 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/trainer.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from types import MethodType
from typing import TYPE_CHECKING, Optional, Union
import torch
from transformers import Trainer
from typing_extensions import override
from ...extras import logging
from ...extras.packages import is_transformers_version_greater_than
from ..callbacks import FixValueHeadModelCallback, SaveProcessorCallback
from ..trainer_utils import create_custom_optimizer, create_custom_scheduler
if TYPE_CHECKING:
from transformers import PreTrainedModel, ProcessorMixin
from transformers.trainer import PredictionOutput
from ...hparams import FinetuningArguments
logger = logging.get_logger(__name__)
class PairwiseTrainer(Trainer):
def __init__(
self, finetuning_args: "FinetuningArguments", processor: Optional["ProcessorMixin"], **kwargs
) -> None:
if is_transformers_version_greater_than("4.46"):
kwargs["processing_class"] = kwargs.pop("tokenizer")
super().__init__(**kwargs)
self.model_accepts_loss_kwargs = False # overwrite trainer's default behavior
self.finetuning_args = finetuning_args
self.can_return_loss = True # override property to return eval_loss
self.add_callback(FixValueHeadModelCallback)
if processor is not None:
self.add_callback(SaveProcessorCallback(processor))
if finetuning_args.use_badam:
from badam import BAdamCallback, clip_grad_norm_old_version # type: ignore
self.accelerator.clip_grad_norm_ = MethodType(clip_grad_norm_old_version, self.accelerator)
self.add_callback(BAdamCallback)
@override
def create_optimizer(self) -> "torch.optim.Optimizer":
if self.optimizer is None:
self.optimizer = create_custom_optimizer(self.model, self.args, self.finetuning_args)
return super().create_optimizer()
@override
def create_scheduler(
self, num_training_steps: int, optimizer: Optional["torch.optim.Optimizer"] = None
) -> "torch.optim.lr_scheduler.LRScheduler":
create_custom_scheduler(self.args, num_training_steps, optimizer)
return super().create_scheduler(num_training_steps, optimizer)
@override
def _get_train_sampler(self, *args, **kwargs) -> Optional["torch.utils.data.Sampler"]:
if self.finetuning_args.disable_shuffling:
return torch.utils.data.SequentialSampler(self.train_dataset)
return super()._get_train_sampler(*args, **kwargs)
@override
def compute_loss(
self, model: "PreTrainedModel", inputs: dict[str, "torch.Tensor"], return_outputs: bool = False, **kwargs
) -> Union["torch.Tensor", tuple["torch.Tensor", list["torch.Tensor"]]]:
_, _, values = model(**inputs, output_hidden_states=True, return_dict=True, use_cache=False)
batch_size = inputs["input_ids"].size(0) // 2
chosen_masks, rejected_masks = torch.split(inputs["attention_mask"], batch_size, dim=0)
chosen_rewards, rejected_rewards = torch.split(values, batch_size, dim=0)
chosen_scores = chosen_rewards.gather(dim=-1, index=(chosen_masks.sum(dim=-1, keepdim=True) - 1))
rejected_scores = rejected_rewards.gather(dim=-1, index=(rejected_masks.sum(dim=-1, keepdim=True) - 1))
chosen_scores, rejected_scores = chosen_scores.squeeze(), rejected_scores.squeeze()
loss = -torch.nn.functional.logsigmoid(chosen_scores.float() - rejected_scores.float()).mean()
if return_outputs:
return loss, (loss, chosen_scores, rejected_scores)
else:
return loss
@override
def _save(self, output_dir: Optional[str] = None, state_dict=None):
if state_dict is None:
state_dict = self.model.state_dict()
if getattr(self.args, "save_safetensors", True):
from collections import defaultdict
ptrs = defaultdict(list)
for name, tensor in state_dict.items():
if isinstance(tensor, torch.Tensor):
ptrs[id(tensor)].append(name)
for names in ptrs.values():
if len(names) > 1:
names.sort()
for name in names[1:]:
state_dict.pop(name, None)
super()._save(output_dir, state_dict)
def save_predictions(self, predict_results: "PredictionOutput") -> None:
if not self.is_world_process_zero():
return
output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl")
logger.info_rank0(f"Saving prediction results to {output_prediction_file}")
chosen_scores, rejected_scores = predict_results.predictions
with open(output_prediction_file, "w", encoding="utf-8") as writer:
res: list[str] = []
for c_score, r_score in zip(chosen_scores, rejected_scores):
res.append(json.dumps({"chosen": round(float(c_score), 2), "rejected": round(float(r_score), 2)}))
writer.write("\n".join(res)) | --- +++ @@ -41,6 +41,7 @@
class PairwiseTrainer(Trainer):
+ r"""Inherits Trainer to compute pairwise loss."""
def __init__(
self, finetuning_args: "FinetuningArguments", processor: Optional["ProcessorMixin"], **kwargs
@@ -87,6 +88,13 @@ def compute_loss(
self, model: "PreTrainedModel", inputs: dict[str, "torch.Tensor"], return_outputs: bool = False, **kwargs
) -> Union["torch.Tensor", tuple["torch.Tensor", list["torch.Tensor"]]]:
+ r"""Compute pairwise loss. The first n examples are chosen and the last n examples are rejected.
+
+ Subclass and override to inject custom behavior.
+
+ Note that the first element will be removed from the output tuple.
+ See: https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/trainer.py#L3842
+ """
_, _, values = model(**inputs, output_hidden_states=True, return_dict=True, use_cache=False)
batch_size = inputs["input_ids"].size(0) // 2
chosen_masks, rejected_masks = torch.split(inputs["attention_mask"], batch_size, dim=0)
@@ -123,6 +131,10 @@ super()._save(output_dir, state_dict)
def save_predictions(self, predict_results: "PredictionOutput") -> None:
+ r"""Save model predictions to `output_dir`.
+
+ A custom behavior that not contained in Seq2SeqTrainer.
+ """
if not self.is_world_process_zero():
return
@@ -135,4 +147,4 @@ for c_score, r_score in zip(chosen_scores, rejected_scores):
res.append(json.dumps({"chosen": round(float(c_score), 2), "rejected": round(float(r_score), 2)}))
- writer.write("\n".join(res))+ writer.write("\n".join(res))
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/train/rm/trainer.py |
Write docstrings describing functionality | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...utils.objects import StatefulBuffer
from ...utils.plugin import BasePlugin
from ...utils.types import BatchInfo, BatchInput, DataLoader
class BatchingPlugin(BasePlugin):
def compute_length(self, data_provider: DataLoader) -> int:
raise NotImplementedError()
def fill_buffer(self, buffer: StatefulBuffer, batch_info: BatchInfo) -> None:
raise NotImplementedError()
def generate_batch(self, buffer: StatefulBuffer, batch_info: BatchInfo) -> list[BatchInput] | None:
raise NotImplementedError() | --- +++ @@ -19,10 +19,16 @@
class BatchingPlugin(BasePlugin):
def compute_length(self, data_provider: DataLoader) -> int:
+ """Compute the length of the batch generator.
+
+ The approximate length is used to calculate the lr schedule.
+ """
raise NotImplementedError()
def fill_buffer(self, buffer: StatefulBuffer, batch_info: BatchInfo) -> None:
+ """Fill the buffer with data."""
raise NotImplementedError()
def generate_batch(self, buffer: StatefulBuffer, batch_info: BatchInfo) -> list[BatchInput] | None:
- raise NotImplementedError()+ """Generate a batch from the buffer."""
+ raise NotImplementedError()
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/trainer_plugins/batching.py |
Document this module using docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from accelerate import Accelerator
from accelerate.utils import DeepSpeedPlugin
from ....utils.logging import get_logger
from ....utils.types import HFModel, Processor
logger = get_logger(__name__)
class DeepSpeedEngine:
def __init__(self, dist_config: dict[str, Any], num_micro_batch: int = 1, micro_batch_size: int = 1):
config_file = dist_config.get("config_file")
if not config_file:
raise ValueError("DeepSpeed config_file is required in dist_config")
ds_plugin = DeepSpeedPlugin(hf_ds_config=config_file)
self.accelerator = Accelerator(
deepspeed_plugin=ds_plugin,
gradient_accumulation_steps=num_micro_batch,
)
# Resolve "auto" for train_micro_batch_size_per_gpu so that
# accelerate.prepare() does not require a DataLoader to infer it.
ds_config = self.accelerator.state.deepspeed_plugin.deepspeed_config
if ds_config.get("train_micro_batch_size_per_gpu") in (None, "auto"):
ds_config["train_micro_batch_size_per_gpu"] = micro_batch_size
logger.info_rank0(f"DeepSpeedEngine initialized with config: {config_file}")
def shard_model(self, model: HFModel) -> "DeepSpeedEngine":
return self
def prepare(
self,
model: HFModel,
optimizer: torch.optim.Optimizer,
lr_scheduler: Optional[Any] = None,
) -> tuple[HFModel, torch.optim.Optimizer, Any]:
if lr_scheduler is not None:
model, optimizer, lr_scheduler = self.accelerator.prepare(model, optimizer, lr_scheduler)
else:
model, optimizer = self.accelerator.prepare(model, optimizer)
model._accelerator = self.accelerator # type: ignore[assignment]
logger.info_rank0("Model, optimizer, and lr_scheduler prepared via accelerate")
return model, optimizer, lr_scheduler
def backward(self, loss: torch.Tensor) -> None:
self.accelerator.backward(loss)
def get_grad_norm(self) -> float:
engine_wrapper = getattr(self.accelerator, "deepspeed_engine_wrapped", None)
if engine_wrapper is not None:
return engine_wrapper.engine.get_global_grad_norm() or 0.0
return 0.0
def save_model(model: HFModel, output_dir: str, processor: Processor) -> None:
accelerator: Accelerator = model._accelerator # type: ignore[union-attr]
unwrapped_model = accelerator.unwrap_model(model)
state_dict = accelerator.get_state_dict(model)
if accelerator.is_main_process:
unwrapped_model.save_pretrained(output_dir, state_dict=state_dict, max_shard_size="4GB")
processor.save_pretrained(output_dir, max_shard_size="4GB")
accelerator.wait_for_everyone()
logger.info_rank0(f"Model saved to {output_dir}") | --- +++ @@ -12,6 +12,12 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""DeepSpeed integration via accelerate's built-in capabilities.
+
+Instead of manually calling deepspeed.initialize() and syncing config,
+this module leverages accelerate's Accelerator + DeepSpeedPlugin to handle
+initialization, backward, gradient accumulation, and model saving.
+"""
from typing import Any, Optional
@@ -27,6 +33,16 @@
class DeepSpeedEngine:
+ """DeepSpeed integration using accelerate's built-in capabilities.
+
+ This replaces the manual DeepSpeedConfigHelper / DeepSpeedEngine approach
+ with accelerate's Accelerator + DeepSpeedPlugin, which handles:
+ - Config syncing (auto values, batch size, lr, etc.)
+ - deepspeed.initialize() call
+ - Optimizer / LR scheduler wrapping
+ - Backward + gradient accumulation boundary
+ - ZeRO-3 parameter gathering for saving
+ """
def __init__(self, dist_config: dict[str, Any], num_micro_batch: int = 1, micro_batch_size: int = 1):
config_file = dist_config.get("config_file")
@@ -49,6 +65,10 @@ logger.info_rank0(f"DeepSpeedEngine initialized with config: {config_file}")
def shard_model(self, model: HFModel) -> "DeepSpeedEngine":
+ """No-op shard — actual model wrapping happens in prepare().
+
+ Returns self so the caller gets the engine instance via the hub interface.
+ """
return self
def prepare(
@@ -57,6 +77,10 @@ optimizer: torch.optim.Optimizer,
lr_scheduler: Optional[Any] = None,
) -> tuple[HFModel, torch.optim.Optimizer, Any]:
+ """Prepare model, optimizer, and lr_scheduler using accelerate.
+
+ Internally calls deepspeed.initialize() and wraps the returned objects.
+ """
if lr_scheduler is not None:
model, optimizer, lr_scheduler = self.accelerator.prepare(model, optimizer, lr_scheduler)
else:
@@ -68,9 +92,17 @@ return model, optimizer, lr_scheduler
def backward(self, loss: torch.Tensor) -> None:
+ """Backward pass using accelerate.
+
+ Delegates to DeepSpeedEngineWrapper.backward() which respects
+ sync_gradients to control gradient accumulation boundaries.
+ When sync_gradients=True: engine.backward(loss) + engine.step()
+ When sync_gradients=False: engine.backward(loss) only
+ """
self.accelerator.backward(loss)
def get_grad_norm(self) -> float:
+ """Get the global gradient norm from the DeepSpeed engine."""
engine_wrapper = getattr(self.accelerator, "deepspeed_engine_wrapped", None)
if engine_wrapper is not None:
return engine_wrapper.engine.get_global_grad_norm() or 0.0
@@ -78,6 +110,12 @@
def save_model(model: HFModel, output_dir: str, processor: Processor) -> None:
+ """Save model using accelerate's built-in ZeRO-aware utilities.
+
+ Expects model._accelerator to be set during prepare().
+ Handles ZeRO-3 parameter gathering automatically via
+ accelerator.get_state_dict().
+ """
accelerator: Accelerator = model._accelerator # type: ignore[union-attr]
unwrapped_model = accelerator.unwrap_model(model)
@@ -88,4 +126,4 @@ processor.save_pretrained(output_dir, max_shard_size="4GB")
accelerator.wait_for_everyone()
- logger.info_rank0(f"Model saved to {output_dir}")+ logger.info_rank0(f"Model saved to {output_dir}")
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/trainer_plugins/distributed/deepspeed.py |
Write reusable docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import types
from ......accelerator.helper import DeviceType
from ......utils.types import HFModel
from ...base import BaseKernel
from ...registry import register_kernel
def npu_rms_norm_forward(self, hidden_states):
import torch_npu
return torch_npu.npu_rms_norm(hidden_states, self.weight, epsilon=self.variance_epsilon)[0]
@register_kernel
class NpuRMSNormKernel(BaseKernel):
_kernel_id = "npu_fused_rmsnorm"
_device = DeviceType.NPU
@classmethod
def apply(cls, **kwargs) -> "HFModel":
model = kwargs.get("model")
if model is None:
raise ValueError(f"HFModel instance is required for {cls.__name__}.")
if not cls.check_deps():
raise RuntimeError(f"torch_npu is not available but {cls.__name__} was called.")
rms_norm_pattern = re.compile("RMSNorm", re.IGNORECASE)
for name, module in model.named_modules():
# Match any module whose class name contains "RMSNorm"
if re.search(rms_norm_pattern, module.__class__.__name__):
# Bind function as an instance method to preserve `self` semantics
# and replace the original forward
module.forward = types.MethodType(npu_rms_norm_forward, module)
return model | --- +++ @@ -12,6 +12,13 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of NPU fused RMSNorm kernels.
+
+Init Phase:
+1. Define RMSNorm forward function.
+2. Register NPU fused RMSNorm kernel.
+
+"""
import re
import types
@@ -23,6 +30,15 @@
def npu_rms_norm_forward(self, hidden_states):
+ """NPU forward implementation for RMSNorm.
+
+ Args:
+ self: RMSNorm module instance with `weight` and `variance_epsilon`.
+ hidden_states (Tensor): Input hidden states tensor, same shape as the baseline.
+
+ Returns:
+ Tensor: Normalized tensor consistent with the baseline RMSNorm behavior.
+ """
import torch_npu
return torch_npu.npu_rms_norm(hidden_states, self.weight, epsilon=self.variance_epsilon)[0]
@@ -30,12 +46,32 @@
@register_kernel
class NpuRMSNormKernel(BaseKernel):
+ """NPU kernel wrapper for RMSNorm that applies the replacement within a model."""
_kernel_id = "npu_fused_rmsnorm"
_device = DeviceType.NPU
@classmethod
def apply(cls, **kwargs) -> "HFModel":
+ """Iterate the model and apply NPU-optimized forward to matched RMSNorm modules.
+
+ Key points:
+ - Match modules whose class name contains "RMSNorm" (case-insensitive).
+ - Bind `_npu_rms_forward` as an instance method via `types.MethodType` to
+ replace the original `forward`.
+ - Do not modify weights, hyperparameters, or module structure to ensure
+ numerical behavior and interface consistency.
+
+ Args:
+ **kwargs: Keyword arguments containing the model.
+
+ Returns:
+ HFModel: The model with NPU fused RMSNorm.
+
+ Raises:
+ RuntimeError: If torch_npu is not available.
+ ValueError: If the model is not provided.
+ """
model = kwargs.get("model")
if model is None:
raise ValueError(f"HFModel instance is required for {cls.__name__}.")
@@ -52,4 +88,4 @@ # and replace the original forward
module.forward = types.MethodType(npu_rms_norm_forward, module)
- return model+ return model
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/kernels/ops/rms_norm/npu_rms_norm.py |
Write clean docstrings for readability | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import gc
import os
import torch
import torch.nn as nn
from peft.tuners.lora import LoraLayer
from torch.distributed.checkpoint.state_dict import StateDictOptions, get_model_state_dict, set_model_state_dict
from torch.distributed.fsdp import (
CPUOffloadPolicy,
MixedPrecisionPolicy,
fully_shard,
)
from ....accelerator.helper import get_current_accelerator
from ....accelerator.interface import DistributedInterface
from ....utils.logging import get_logger
from ....utils.types import HFModel, Processor
logger = get_logger(__name__)
def get_transformer_layer_cls(model: HFModel) -> type[nn.Module] | None:
no_split_modules = getattr(model, "_no_split_modules", None)
if no_split_modules:
if isinstance(no_split_modules, (list, tuple)):
for name, module in model.named_modules():
for cls_name in no_split_modules:
if module.__class__.__name__ == cls_name:
return module.__class__
if hasattr(model, "model") and hasattr(model.model, "layers"):
return type(model.model.layers[0])
if hasattr(model, "layers"):
return type(model.layers[0])
return None
def save_model(model: HFModel, output_dir: str, processor: Processor) -> None:
if DistributedInterface().get_rank() == 0:
logger.info("Gathering state dict for saving...")
options = StateDictOptions(full_state_dict=True, cpu_offload=True)
state_dict = get_model_state_dict(model, options=options)
if DistributedInterface().get_rank() == 0:
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir, state_dict=state_dict, max_shard_size="4GB")
processor.save_pretrained(output_dir, max_shard_size="4GB")
logger.info(f"Model saved to {output_dir}")
class FSDP2Engine:
def __init__(self, dist_config: dict):
self.dist_interface = DistributedInterface()
self.rank = self.dist_interface.get_rank()
self.local_rank = self.dist_interface.get_local_rank()
self.world_size = self.dist_interface.get_world_size()
self.mixed_precision = dist_config.get("mixed_precision", "bf16")
self.reshard_after_forward = dist_config.get("reshard_after_forward", True)
self.offload_params = dist_config.get("offload_params", False)
self.pin_memory = dist_config.get("pin_memory", True)
self.dcp_path = dist_config.get("dcp_path", None)
self.device_mesh = self.dist_interface.data_device_mesh
if self.device_mesh is None:
logger.warning(
"Device Mesh not found in DistributedInterface. FSDP2 might fail if not running in distributed mode."
)
if self.device_mesh is not None:
try:
self.fsdp_mesh = self.device_mesh["dp"]
except Exception:
self.fsdp_mesh = self.device_mesh
logger.info(f"Using Device Mesh: {self.fsdp_mesh}")
else:
self.fsdp_mesh = None
def get_mp_policy(self) -> MixedPrecisionPolicy:
if self.mixed_precision == "bf16":
param_dtype = torch.bfloat16
reduce_dtype = torch.float32
elif self.mixed_precision == "fp16":
param_dtype = torch.float16
reduce_dtype = torch.float32
else:
param_dtype = torch.float32
reduce_dtype = torch.float32
return MixedPrecisionPolicy(
param_dtype=param_dtype,
reduce_dtype=reduce_dtype,
cast_forward_inputs=True,
)
def is_lora_module_wrap(self, model) -> bool:
return any(isinstance(module, LoraLayer) for module in model.modules())
def prepare_model(self, model: HFModel) -> HFModel:
if self.fsdp_mesh is None:
logger.warning("No FSDP Mesh available, skipping FSDP wrapping.")
return model
mp_policy = self.get_mp_policy()
layer_cls = get_transformer_layer_cls(model)
if layer_cls is None:
logger.warning(
"Could not identify Transformer Layer class, applying FSDP to the whole model structure only."
)
transformer_layer_cls_to_wrap = set()
else:
logger.info(f"Applying per-layer FSDP to {layer_cls.__name__}")
transformer_layer_cls_to_wrap = {layer_cls}
if self.is_lora_module_wrap(model):
lora_modules = []
for module in model.modules():
if len(list(module.children())) != 0:
continue
if any(param.requires_grad for param in module.parameters(recurse=False)):
lora_modules.append(module)
for module in lora_modules:
fully_shard(
module,
mesh=self.fsdp_mesh,
reshard_after_forward=self.reshard_after_forward,
mp_policy=mp_policy,
offload_policy=CPUOffloadPolicy(pin_memory=self.pin_memory) if self.offload_params else None,
)
logger.info("Applying FSDP wrap for LoRA layer separately.")
for name, module in model.named_modules():
should_wrap = False
if type(module) in transformer_layer_cls_to_wrap:
should_wrap = True
elif isinstance(module, nn.Embedding):
if not getattr(model.config, "tie_word_embeddings", True):
should_wrap = True
if should_wrap:
fully_shard(
module,
mesh=self.fsdp_mesh,
reshard_after_forward=self.reshard_after_forward,
mp_policy=mp_policy,
offload_policy=CPUOffloadPolicy(pin_memory=self.pin_memory) if self.offload_params else None,
)
# BaseTrainer is the single source of truth for gradient checkpointing.
# FSDP2 only applies the input-grad compatibility hook when checkpointing is already enabled.
if getattr(model, "is_gradient_checkpointing", False):
if self.rank == 0:
logger.info("Gradient checkpointing is enabled. Applying FSDP2 input grad preparation.")
if hasattr(model, "enable_input_require_grads"):
model.enable_input_require_grads()
else:
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
fully_shard(
model,
mesh=self.fsdp_mesh,
reshard_after_forward=self.reshard_after_forward,
mp_policy=mp_policy,
offload_policy=CPUOffloadPolicy(pin_memory=self.pin_memory) if self.offload_params else None,
)
return model
@torch.no_grad()
def materialize_and_load(self, model: HFModel, hf_model_path: str, dcp_path: str = None):
if self.rank == 0:
logger.info("Materializing sharded model params...")
device = get_current_accelerator()
model.to_empty(device=device)
if dcp_path and os.path.exists(dcp_path):
if self.rank == 0:
logger.info(f"DCP path found at {dcp_path}. Using efficient Sharded Loading (DCP Load).")
self._load_from_dcp(model, dcp_path)
else:
if self.rank == 0:
if dcp_path:
logger.warning(f"DCP path {dcp_path} not found.")
logger.info("Using HF Meta Loading (Chunk Load).")
self._load_weights_from_hf_checkpoint(model, hf_model_path)
return model
def _save_non_persistent_buffers(self, model: HFModel) -> dict:
saved = {}
for mod_name, module in model.named_modules():
for buf_name in module._non_persistent_buffers_set:
fqn = f"{mod_name}.{buf_name}" if mod_name else buf_name
buf = getattr(module, buf_name, None)
if buf is not None:
saved[fqn] = copy.deepcopy(buf)
if self.rank == 0 and saved:
logger.info(f"Saved {len(saved)} non-persistent buffers")
return saved
def _restore_non_persistent_buffers(self, model: HFModel, saved_buffers: dict):
if not saved_buffers:
return
device = get_current_accelerator()
for fqn, buf in saved_buffers.items():
buf = buf.to(device)
if "." in fqn:
parent_fqn, buf_name = fqn.rsplit(".", 1)
parent_module = model.get_submodule(parent_fqn)
else:
buf_name = fqn
parent_module = model
parent_module.register_buffer(buf_name, buf, persistent=False)
if self.rank == 0:
logger.info(f"Restored {len(saved_buffers)} non-persistent buffers")
def shard_model(self, model: HFModel) -> HFModel:
if model.device.type == "meta":
non_persistent_buffers = self._save_non_persistent_buffers(model)
if getattr(model.config, "tie_word_embeddings", None):
model.tie_weights()
model = self.prepare_model(model)
model = self.materialize_and_load(model, hf_model_path=model.config.name_or_path, dcp_path=self.dcp_path)
# fix tied broken for no-fsdp-wrap case
if getattr(model.config, "tie_word_embeddings", None):
model.tie_weights()
self._restore_non_persistent_buffers(model, non_persistent_buffers)
else:
model = self.prepare_model(model)
return model
def _load_from_dcp(self, model: HFModel, dcp_path: str):
import torch.distributed.checkpoint as dcp
try:
if self.rank == 0:
logger.info(f"Loading distributed checkpoint from {dcp_path} ...")
options = StateDictOptions(full_state_dict=False, cpu_offload=True)
local_state_dict = get_model_state_dict(model, options=options)
dcp.load(state_dict=local_state_dict, checkpoint_id=dcp_path)
set_model_state_dict(model, local_state_dict, options=options)
if self.rank == 0:
logger.info("DCP weights loaded successfully.")
except Exception as e:
logger.error(f"Failed to load from DCP: {e}")
raise e
def _load_weights_from_hf_checkpoint(self, model: HFModel, hf_model_path: str):
import glob
import json
hf_model_path = self._resolve_hf_checkpoint_dir(hf_model_path)
if self.rank == 0:
logger.info(f"Loading weights from {hf_model_path} ...")
index_file = os.path.join(hf_model_path, "model.safetensors.index.json")
is_safetensors = True
checkpoint_files = []
if os.path.exists(index_file):
with open(index_file) as f:
index = json.load(f)
checkpoint_files = sorted(set(index["weight_map"].values()))
checkpoint_files = [os.path.join(hf_model_path, f) for f in checkpoint_files]
elif os.path.exists(os.path.join(hf_model_path, "model.safetensors")):
checkpoint_files = [os.path.join(hf_model_path, "model.safetensors")]
else:
is_safetensors = False
index_file = os.path.join(hf_model_path, "pytorch_model.bin.index.json")
if os.path.exists(index_file):
with open(index_file) as f:
index = json.load(f)
checkpoint_files = sorted(set(index["weight_map"].values()))
checkpoint_files = [os.path.join(hf_model_path, f) for f in checkpoint_files]
elif os.path.exists(os.path.join(hf_model_path, "pytorch_model.bin")):
checkpoint_files = [os.path.join(hf_model_path, "pytorch_model.bin")]
else:
checkpoint_files = sorted(glob.glob(os.path.join(hf_model_path, "*.safetensors")))
if checkpoint_files:
is_safetensors = True
else:
checkpoint_files = sorted(glob.glob(os.path.join(hf_model_path, "*.bin")))
if not checkpoint_files:
raise ValueError(f"No checkpoint files found in {hf_model_path}")
param_map = dict(model.named_parameters())
total_files = len(checkpoint_files)
for i, ckpt_file in enumerate(checkpoint_files):
if self.rank == 0:
logger.info(f"[{i + 1}/{total_files}] Loading {os.path.basename(ckpt_file)} ...")
if is_safetensors:
from safetensors import safe_open
with safe_open(ckpt_file, framework="pt", device="cpu") as f:
for key in f.keys():
if key in param_map:
tensor = f.get_tensor(key)
self._copy_weights(param_map[key], tensor)
else:
state_dict = torch.load(ckpt_file, map_location="cpu")
for key, tensor in state_dict.items():
if key in param_map:
self._copy_weights(param_map[key], tensor)
del state_dict
gc.collect()
def _resolve_hf_checkpoint_dir(self, hf_model_path: str) -> str:
if not hf_model_path:
return hf_model_path
# Local directory or file path.
if os.path.isdir(hf_model_path):
return hf_model_path
if os.path.isfile(hf_model_path):
return os.path.dirname(hf_model_path)
# HuggingFace Hub repo id: snapshot to local cache so we can glob/index files.
try:
from huggingface_hub import snapshot_download
except ImportError as e:
raise ValueError(
f"hf_model_path='{hf_model_path}' does not exist locally and huggingface_hub is not available "
f"to download it. Please provide a local model directory or install huggingface_hub. Error: {e}"
) from e
revision = os.getenv("HF_REVISION")
offline = os.getenv("HF_HUB_OFFLINE") == "1" or os.getenv("TRANSFORMERS_OFFLINE") == "1"
# In distributed runs, let rank0 download first to avoid N-way concurrent downloads.
if torch.distributed.is_available() and torch.distributed.is_initialized():
if self.rank == 0:
local_dir = snapshot_download(
repo_id=hf_model_path,
revision=revision,
local_files_only=offline,
allow_patterns=[
"*.safetensors",
"*.bin",
"*.index.json",
"model.safetensors",
"model.safetensors.index.json",
"pytorch_model.bin",
"pytorch_model.bin.index.json",
"config.json",
],
)
logger.info(f"Resolved HF repo id '{hf_model_path}' to local dir: {local_dir}")
torch.distributed.barrier()
if self.rank != 0:
local_dir = snapshot_download(
repo_id=hf_model_path,
revision=revision,
local_files_only=True,
allow_patterns=[
"*.safetensors",
"*.bin",
"*.index.json",
"model.safetensors",
"model.safetensors.index.json",
"pytorch_model.bin",
"pytorch_model.bin.index.json",
"config.json",
],
)
return local_dir
local_dir = snapshot_download(
repo_id=hf_model_path,
revision=revision,
local_files_only=offline,
allow_patterns=[
"*.safetensors",
"*.bin",
"*.index.json",
"model.safetensors",
"model.safetensors.index.json",
"pytorch_model.bin",
"pytorch_model.bin.index.json",
"config.json",
],
)
if self.rank == 0:
logger.info(f"Resolved HF repo id '{hf_model_path}' to local dir: {local_dir}")
return local_dir
def _copy_weights(self, param, loaded_tensor):
from torch.distributed._tensor import DTensor, Shard
if loaded_tensor.dtype != param.dtype:
loaded_tensor = loaded_tensor.to(param.dtype)
if isinstance(param, DTensor):
shard_placement = None
mesh_dim = -1
for i, placement in enumerate(param.placements):
if isinstance(placement, Shard):
shard_placement = placement
mesh_dim = i
break
local_tensor = param.to_local()
if shard_placement is None:
local_tensor.copy_(loaded_tensor)
else:
dim = shard_placement.dim
mesh = param.device_mesh
my_coordinate = mesh.get_coordinate()
if my_coordinate is None:
return
rank_in_dim = my_coordinate[mesh_dim]
world_size_in_dim = mesh.size(mesh_dim)
full_size = param.shape[dim]
chunk_size = (full_size + world_size_in_dim - 1) // world_size_in_dim
start = rank_in_dim * chunk_size
end = min(start + chunk_size, full_size)
if start >= full_size:
return
sliced_tensor = loaded_tensor.narrow(dim, start, end - start)
slices = [slice(None)] * local_tensor.ndim
slices[dim] = slice(0, sliced_tensor.shape[dim])
local_tensor[tuple(slices)].copy_(sliced_tensor)
else:
param.data.copy_(loaded_tensor) | --- +++ @@ -214,6 +214,7 @@ return model
def _save_non_persistent_buffers(self, model: HFModel) -> dict:
+ """Save non-persistent buffers, such as inv_freq."""
saved = {}
for mod_name, module in model.named_modules():
for buf_name in module._non_persistent_buffers_set:
@@ -226,6 +227,7 @@ return saved
def _restore_non_persistent_buffers(self, model: HFModel, saved_buffers: dict):
+ """Register saved non-persistent buffers to model."""
if not saved_buffers:
return
device = get_current_accelerator()
@@ -344,6 +346,12 @@ gc.collect()
def _resolve_hf_checkpoint_dir(self, hf_model_path: str) -> str:
+ """Resolve a HF model identifier or local path to a local directory containing checkpoint files.
+
+ - If `hf_model_path` is an existing directory, return it.
+ - If it's a file path, return its parent directory.
+ - Otherwise treat it as a Hugging Face Hub repo id and download/resolve to the local cache dir.
+ """
if not hf_model_path:
return hf_model_path
@@ -467,4 +475,4 @@ slices[dim] = slice(0, sliced_tensor.shape[dim])
local_tensor[tuple(slices)].copy_(sliced_tensor)
else:
- param.data.copy_(loaded_tensor)+ param.data.copy_(loaded_tensor)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/trainer_plugins/distributed/fsdp2.py |
Add docstrings that explain inputs and outputs | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ....accelerator.helper import get_current_accelerator
from .base import BaseKernel
__all__ = ["Registry", "register_kernel"]
class Registry:
_kernels: dict[str, type[BaseKernel]] = {}
@classmethod
def register(cls, kernel_cls: type[BaseKernel]) -> type[BaseKernel] | None:
if not issubclass(kernel_cls, BaseKernel):
raise TypeError(f"Class {kernel_cls} must inherit from BaseKernel")
kernel_id = kernel_cls.get_kernel_id()
device = kernel_cls.get_device()
# The device type of the current accelerator does not match the device type required by the kernel, skip registration
if device != get_current_accelerator().type:
return
if not kernel_id:
raise ValueError(f"Kernel ID (_kernel_id) is needed for {kernel_cls} to register")
if kernel_id in cls._kernels:
raise ValueError(f"{kernel_id} already registered! The registered kernel is {cls._kernels[kernel_id]}")
cls._kernels[kernel_id] = kernel_cls
return kernel_cls
@classmethod
def get(cls, kernel_id: str) -> type[BaseKernel] | None:
return cls._kernels.get(kernel_id)
@classmethod
def get_registered_kernels(cls) -> dict[str, type[BaseKernel]]:
return cls._kernels
# export decorator alias
register_kernel = Registry.register | --- +++ @@ -12,6 +12,13 @@ # See the License for the specific language governing permissions and
# limitations under the License.
+"""The definition of kernel registry.
+
+Init Phase:
+1. Define kernel registry.
+2. Register kernels.
+
+"""
from ....accelerator.helper import get_current_accelerator
from .base import BaseKernel
@@ -21,11 +28,29 @@
class Registry:
+ """Registry for managing kernel implementations.
+
+ Storage structure: ``{ "kernel_id": Class }``
+ """
_kernels: dict[str, type[BaseKernel]] = {}
@classmethod
def register(cls, kernel_cls: type[BaseKernel]) -> type[BaseKernel] | None:
+ """Decorator to register a kernel class.
+
+ The class must inherit from :class:`BaseKernel` and specify ``_kernel_id`` and ``_device`` attributes.
+
+ Args:
+ kernel_cls (type[BaseKernel]): The kernel class to register.
+
+ Returns:
+ type[BaseKernel] | None: The registered kernel class if the device type matches the current accelerator
+
+ Raises:
+ TypeError: If the class does not inherit from :class:`BaseKernel`.
+ ValueError: If the kernel ID is missing or already registered.
+ """
if not issubclass(kernel_cls, BaseKernel):
raise TypeError(f"Class {kernel_cls} must inherit from BaseKernel")
@@ -47,12 +72,25 @@
@classmethod
def get(cls, kernel_id: str) -> type[BaseKernel] | None:
+ """Retrieves a registered kernel implementation by its ID.
+
+ Args:
+ kernel_id (str): The ID of the kernel to retrieve.
+
+ Returns:
+ type[BaseKernel] | None: The kernel class if found, else ``None``.
+ """
return cls._kernels.get(kernel_id)
@classmethod
def get_registered_kernels(cls) -> dict[str, type[BaseKernel]]:
+ """Returns a dictionary of all registered kernels.
+
+ Returns:
+ dict[str, type[BaseKernel]]: Dictionary mapping kernel IDs to kernel classes.
+ """
return cls._kernels
# export decorator alias
-register_kernel = Registry.register+register_kernel = Registry.register
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/plugins/model_plugins/kernels/registry.py |
Write docstrings including parameters and return values | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
from collections.abc import Generator
from threading import Thread
from ..config import InputArgument, ModelArguments, SampleArguments, SampleBackend, get_args
from ..core.base_sampler import BaseSampler
from ..core.data_engine import DataEngine
from ..core.model_engine import ModelEngine
from ..core.utils.rendering import Renderer
from ..utils.types import HFModel, Message, Sample, TorchDataset
class SyncSampler(BaseSampler):
def __init__(
self,
args: SampleArguments,
model_args: ModelArguments,
model: HFModel,
renderer: Renderer,
) -> None:
def _start_background_loop(loop: asyncio.AbstractEventLoop) -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
super().__init__(args, model_args, model, renderer)
self._loop = asyncio.new_event_loop()
self._thread = Thread(target=_start_background_loop, args=(self._loop,), daemon=True)
self._thread.start()
def generate(self, messages: list[Message], tools: str | None = None) -> Generator[str, None, None]:
generator = super().generate(messages, tools)
while True:
try:
token = asyncio.run_coroutine_threadsafe(generator.__anext__(), self._loop).result()
yield token
except StopAsyncIteration:
break
def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
return asyncio.run_coroutine_threadsafe(super().batch_infer(dataset), self._loop).result()
def run_chat(args: InputArgument = None):
model_args, data_args, _, sample_args = get_args(args)
if sample_args.sample_backend != SampleBackend.HF:
model_args.init_plugin = {"name": "init_on_meta"}
model_engine = ModelEngine(model_args)
sampler = SyncSampler(sample_args, model_args, model_engine.model, model_engine.renderer)
if data_args.train_dataset is not None:
dataset = DataEngine(data_args.train_dataset)
sampler.batch_infer(dataset)
else:
if os.name != "nt":
try:
import readline # noqa: F401
except ImportError:
print("Install `readline` for a better experience.")
messages = []
print("Welcome to the CLI application, use `clear` to remove the history, use `exit` to exit the application.")
while True:
try:
query = input("\nUser: ")
except UnicodeDecodeError:
print("Detected decoding error at the inputs, please set the terminal encoding to utf-8.")
continue
except Exception:
raise
if query.strip() == "exit":
break
if query.strip() == "clear":
messages = []
print("History has been removed.")
continue
messages.append({"role": "user", "content": [{"type": "text", "value": query}]})
print("Assistant: ", end="", flush=True)
response = ""
for new_text in sampler.generate(messages):
print(new_text, end="", flush=True)
response += new_text
print()
messages.append(model_engine.renderer.parse_message(response))
if __name__ == "__main__":
run_chat() | --- +++ @@ -43,6 +43,15 @@ self._thread.start()
def generate(self, messages: list[Message], tools: str | None = None) -> Generator[str, None, None]:
+ """Generate tokens synchronously.
+
+ Args:
+ messages: List of messages.
+ tools: Tools string.
+
+ Yields:
+ Generated tokens.
+ """
generator = super().generate(messages, tools)
while True:
try:
@@ -52,6 +61,14 @@ break
def batch_infer(self, dataset: TorchDataset) -> list[Sample]:
+ """Batch infer samples synchronously.
+
+ Args:
+ dataset: Torch dataset.
+
+ Returns:
+ List of samples.
+ """
return asyncio.run_coroutine_threadsafe(super().batch_infer(dataset), self._loop).result()
@@ -105,4 +122,4 @@
if __name__ == "__main__":
- run_chat()+ run_chat()
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/samplers/cli_sampler.py |
Write proper docstrings for these functions | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from transformers import PreTrainedTokenizer
from transformers import set_seed as hf_set_seed
from ..accelerator.interface import DistributedInterface
from .constants import IGNORE_INDEX
from .types import BatchInput, ModelInput, Processor, Tensor
def set_seed(seed: int) -> None:
hf_set_seed(seed)
def is_tokenizer(processor: Processor) -> bool:
return not hasattr(processor, "tokenizer")
def get_tokenizer(processor: Processor) -> PreTrainedTokenizer:
return processor.tokenizer if hasattr(processor, "tokenizer") else processor
def _pad_and_truncate(tensor: Tensor, max_seqlen: int, pad_value: int = 0) -> Tensor:
if tensor.shape[-1] >= max_seqlen:
return tensor[..., :max_seqlen]
pad_shape = list(tensor.shape)
pad_shape[-1] = max_seqlen - tensor.shape[-1]
pad_tensor = torch.full(pad_shape, pad_value, dtype=tensor.dtype, device=tensor.device)
return torch.cat([tensor, pad_tensor], dim=-1)
def pad_and_truncate(samples: list[ModelInput], max_seqlen: int) -> list[BatchInput]:
max_length = min(max(len(sample["input_ids"]) for sample in samples), max_seqlen)
padded_samples = []
for sample in samples:
padded_sample = {}
for key, value in sample.items():
if "label" in key:
pad_value = IGNORE_INDEX
else:
pad_value = 0
if not isinstance(value, str):
padded_sample[key] = _pad_and_truncate(torch.tensor(value), max_length, pad_value)
else:
padded_sample[key] = value
padded_samples.append(padded_sample)
return padded_samples
def compute_valid_tokens(batches: list[BatchInput]) -> int:
device = DistributedInterface().current_device
return sum(
(batch["labels"].to(device, non_blocking=True) != IGNORE_INDEX).sum().item()
for batch in batches
if "labels" in batch
) | --- +++ @@ -23,14 +23,35 @@
def set_seed(seed: int) -> None:
+ """Set seed for reproducibility.
+
+ Args:
+ seed: Random seed.
+ """
hf_set_seed(seed)
def is_tokenizer(processor: Processor) -> bool:
+ """Check if processor is tokenizer.
+
+ Args:
+ processor: Processor.
+
+ Returns:
+ Whether processor is tokenizer.
+ """
return not hasattr(processor, "tokenizer")
def get_tokenizer(processor: Processor) -> PreTrainedTokenizer:
+ """Get tokenizer from processor.
+
+ Args:
+ processor: Processor.
+
+ Returns:
+ Tokenizer.
+ """
return processor.tokenizer if hasattr(processor, "tokenizer") else processor
@@ -66,9 +87,17 @@
def compute_valid_tokens(batches: list[BatchInput]) -> int:
+ """Compute valid tokens in batches.
+
+ Args:
+ batches: Batches.
+
+ Returns:
+ Number of valid tokens.
+ """
device = DistributedInterface().current_device
return sum(
(batch["labels"].to(device, non_blocking=True) != IGNORE_INDEX).sum().item()
for batch in batches
if "labels" in batch
- )+ )
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/utils/helper.py |
Add docstrings including usage examples | # Copyright 2025 Optuna, HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/utils/logging.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import threading
from functools import lru_cache
from typing import Optional
_thread_lock = threading.RLock()
_default_handler: Optional["logging.Handler"] = None
_default_log_level: "logging._Level" = logging.INFO
class _Logger(logging.Logger):
def info_rank0(self, *args, **kwargs) -> None:
self.info(*args, **kwargs)
def warning_rank0(self, *args, **kwargs) -> None:
self.warning(*args, **kwargs)
def warning_rank0_once(self, *args, **kwargs) -> None:
self.warning(*args, **kwargs)
def _get_default_logging_level() -> "logging._Level":
env_level_str = os.getenv("LLAMAFACTORY_VERBOSITY", None)
if env_level_str:
if env_level_str.upper() in logging._nameToLevel:
return logging._nameToLevel[env_level_str.upper()]
else:
raise ValueError(f"Unknown logging level: {env_level_str}.")
return _default_log_level
def _get_library_name() -> str:
return ".".join(__name__.split(".")[:2]) # llamafactory.v1
def _get_library_root_logger() -> "_Logger":
return logging.getLogger(_get_library_name())
def _configure_library_root_logger() -> None:
global _default_handler
with _thread_lock:
if _default_handler: # already configured
return
formatter = logging.Formatter(
fmt="[%(levelname)s|%(asctime)s] %(name)s:%(lineno)s >> %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
_default_handler = logging.StreamHandler(sys.stdout)
_default_handler.setFormatter(formatter)
library_root_logger = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
library_root_logger.propagate = False
def get_logger(name: str | None = None) -> "_Logger":
if name is None:
name = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(name)
def add_handler(handler: "logging.Handler") -> None:
_configure_library_root_logger()
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
_configure_library_root_logger()
_get_library_root_logger().removeHandler(handler)
def info_rank0(self: "logging.Logger", *args, **kwargs) -> None:
if int(os.getenv("LOCAL_RANK", "0")) == 0:
self.info(*args, **kwargs)
def warning_rank0(self: "logging.Logger", *args, **kwargs) -> None:
if int(os.getenv("LOCAL_RANK", "0")) == 0:
self.warning(*args, **kwargs)
@lru_cache(None)
def warning_rank0_once(self: "logging.Logger", *args, **kwargs) -> None:
if int(os.getenv("LOCAL_RANK", "0")) == 0:
self.warning(*args, **kwargs)
logging.Logger.info_rank0 = info_rank0
logging.Logger.warning_rank0 = warning_rank0
logging.Logger.warning_rank0_once = warning_rank0_once | --- +++ @@ -29,6 +29,7 @@
class _Logger(logging.Logger):
+ """A logger that supports rank0 logging."""
def info_rank0(self, *args, **kwargs) -> None:
self.info(*args, **kwargs)
@@ -41,6 +42,7 @@
def _get_default_logging_level() -> "logging._Level":
+ """Return the default logging level."""
env_level_str = os.getenv("LLAMAFACTORY_VERBOSITY", None)
if env_level_str:
if env_level_str.upper() in logging._nameToLevel:
@@ -60,6 +62,7 @@
def _configure_library_root_logger() -> None:
+ """Configure root logger using a stdout stream handler with an explicit format."""
global _default_handler
with _thread_lock:
@@ -79,6 +82,7 @@
def get_logger(name: str | None = None) -> "_Logger":
+ """Return a logger with the specified name. It it not supposed to be accessed externally."""
if name is None:
name = _get_library_name()
@@ -87,11 +91,13 @@
def add_handler(handler: "logging.Handler") -> None:
+ """Add a handler to the root logger."""
_configure_library_root_logger()
_get_library_root_logger().addHandler(handler)
def remove_handler(handler: logging.Handler) -> None:
+ """Remove a handler to the root logger."""
_configure_library_root_logger()
_get_library_root_logger().removeHandler(handler)
@@ -114,4 +120,4 @@
logging.Logger.info_rank0 = info_rank0
logging.Logger.warning_rank0 = warning_rank0
-logging.Logger.warning_rank0_once = warning_rank0_once+logging.Logger.warning_rank0_once = warning_rank0_once
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/utils/logging.py |
Add docstrings to my Python code | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import Any
from transformers.trainer_utils import get_last_checkpoint
from ..extras.constants import (
CHECKPOINT_NAMES,
PEFT_METHODS,
RUNNING_LOG,
STAGES_USE_PAIR_DATA,
SWANLAB_CONFIG,
TRAINER_LOG,
TRAINING_STAGES,
)
from ..extras.packages import is_gradio_available, is_matplotlib_available
from ..extras.ploting import gen_loss_plot
from ..model import QuantizationMethod
from .common import DEFAULT_CONFIG_DIR, DEFAULT_DATA_DIR, get_model_path, get_save_dir, get_template, load_dataset_info
from .locales import ALERTS
if is_gradio_available():
import gradio as gr
def switch_hub(hub_name: str) -> None:
os.environ["USE_MODELSCOPE_HUB"] = "1" if hub_name == "modelscope" else "0"
os.environ["USE_OPENMIND_HUB"] = "1" if hub_name == "openmind" else "0"
def can_quantize(finetuning_type: str) -> "gr.Dropdown":
if finetuning_type not in PEFT_METHODS:
return gr.Dropdown(value="none", interactive=False)
else:
return gr.Dropdown(interactive=True)
def can_quantize_to(quantization_method: str) -> "gr.Dropdown":
if quantization_method == QuantizationMethod.BNB:
available_bits = ["none", "8", "4"]
elif quantization_method == QuantizationMethod.HQQ:
available_bits = ["none", "8", "6", "5", "4", "3", "2", "1"]
elif quantization_method == QuantizationMethod.EETQ:
available_bits = ["none", "8"]
return gr.Dropdown(choices=available_bits)
def change_stage(training_stage: str = list(TRAINING_STAGES.keys())[0]) -> tuple[list[str], bool]:
return [], TRAINING_STAGES[training_stage] == "pt"
def get_model_info(model_name: str) -> tuple[str, str]:
return get_model_path(model_name), get_template(model_name)
def check_template(lang: str, template: str) -> None:
if template == "default":
gr.Warning(ALERTS["warn_no_instruct"][lang])
def get_trainer_info(lang: str, output_path: os.PathLike, do_train: bool) -> tuple[str, "gr.Slider", dict[str, Any]]:
running_log = ""
running_progress = gr.Slider(visible=False)
running_info = {}
running_log_path = os.path.join(output_path, RUNNING_LOG)
if os.path.isfile(running_log_path):
with open(running_log_path, encoding="utf-8") as f:
running_log = "```\n" + f.read()[-20000:] + "\n```\n" # avoid lengthy log
trainer_log_path = os.path.join(output_path, TRAINER_LOG)
if os.path.isfile(trainer_log_path):
trainer_log: list[dict[str, Any]] = []
with open(trainer_log_path, encoding="utf-8") as f:
for line in f:
trainer_log.append(json.loads(line))
if len(trainer_log) != 0:
latest_log = trainer_log[-1]
percentage = latest_log["percentage"]
label = "Running {:d}/{:d}: {} < {}".format(
latest_log["current_steps"],
latest_log["total_steps"],
latest_log["elapsed_time"],
latest_log["remaining_time"],
)
running_progress = gr.Slider(label=label, value=percentage, visible=True)
if do_train and is_matplotlib_available():
running_info["loss_viewer"] = gr.Plot(gen_loss_plot(trainer_log))
swanlab_config_path = os.path.join(output_path, SWANLAB_CONFIG)
if os.path.isfile(swanlab_config_path):
with open(swanlab_config_path, encoding="utf-8") as f:
swanlab_public_config = json.load(f)
swanlab_link = swanlab_public_config["cloud"]["experiment_url"]
if swanlab_link is not None:
running_info["swanlab_link"] = gr.Markdown(
ALERTS["info_swanlab_link"][lang] + swanlab_link, visible=True
)
return running_log, running_progress, running_info
def list_checkpoints(model_name: str, finetuning_type: str) -> "gr.Dropdown":
checkpoints = []
if model_name:
save_dir = get_save_dir(model_name, finetuning_type)
if save_dir and os.path.isdir(save_dir):
for checkpoint in os.listdir(save_dir):
if os.path.isdir(os.path.join(save_dir, checkpoint)) and any(
os.path.isfile(os.path.join(save_dir, checkpoint, name)) for name in CHECKPOINT_NAMES
):
checkpoints.append(checkpoint)
if finetuning_type in PEFT_METHODS:
return gr.Dropdown(value=[], choices=checkpoints, multiselect=True)
else:
return gr.Dropdown(value=None, choices=checkpoints, multiselect=False)
def list_config_paths(current_time: str) -> "gr.Dropdown":
config_files = [f"{current_time}.yaml"]
if os.path.isdir(DEFAULT_CONFIG_DIR):
for file_name in os.listdir(DEFAULT_CONFIG_DIR):
if file_name.endswith(".yaml") and file_name not in config_files:
config_files.append(file_name)
return gr.Dropdown(choices=config_files)
def list_datasets(dataset_dir: str = None, training_stage: str = list(TRAINING_STAGES.keys())[0]) -> "gr.Dropdown":
dataset_info = load_dataset_info(dataset_dir if dataset_dir is not None else DEFAULT_DATA_DIR)
ranking = TRAINING_STAGES[training_stage] in STAGES_USE_PAIR_DATA
datasets = [k for k, v in dataset_info.items() if v.get("ranking", False) == ranking]
return gr.Dropdown(choices=datasets)
def list_output_dirs(model_name: str | None, finetuning_type: str, current_time: str) -> "gr.Dropdown":
output_dirs = [f"train_{current_time}"]
if model_name:
save_dir = get_save_dir(model_name, finetuning_type)
if save_dir and os.path.isdir(save_dir):
for folder in os.listdir(save_dir):
output_dir = os.path.join(save_dir, folder)
if os.path.isdir(output_dir) and get_last_checkpoint(output_dir) is not None:
output_dirs.append(folder)
return gr.Dropdown(choices=output_dirs) | --- +++ @@ -39,11 +39,20 @@
def switch_hub(hub_name: str) -> None:
+ r"""Switch model hub.
+
+ Inputs: top.hub_name
+ """
os.environ["USE_MODELSCOPE_HUB"] = "1" if hub_name == "modelscope" else "0"
os.environ["USE_OPENMIND_HUB"] = "1" if hub_name == "openmind" else "0"
def can_quantize(finetuning_type: str) -> "gr.Dropdown":
+ r"""Judge if the quantization is available in this finetuning type.
+
+ Inputs: top.finetuning_type
+ Outputs: top.quantization_bit
+ """
if finetuning_type not in PEFT_METHODS:
return gr.Dropdown(value="none", interactive=False)
else:
@@ -51,6 +60,11 @@
def can_quantize_to(quantization_method: str) -> "gr.Dropdown":
+ r"""Get the available quantization bits.
+
+ Inputs: top.quantization_method
+ Outputs: top.quantization_bit
+ """
if quantization_method == QuantizationMethod.BNB:
available_bits = ["none", "8", "4"]
elif quantization_method == QuantizationMethod.HQQ:
@@ -62,19 +76,44 @@
def change_stage(training_stage: str = list(TRAINING_STAGES.keys())[0]) -> tuple[list[str], bool]:
+ r"""Modify states after changing the training stage.
+
+ Inputs: train.training_stage
+ Outputs: train.dataset, train.packing
+ """
return [], TRAINING_STAGES[training_stage] == "pt"
def get_model_info(model_name: str) -> tuple[str, str]:
+ r"""Get the necessary information of this model.
+
+ Inputs: top.model_name
+ Outputs: top.model_path, top.template
+ """
return get_model_path(model_name), get_template(model_name)
def check_template(lang: str, template: str) -> None:
+ r"""Check if an instruct model is used.
+
+ Please use queue=True to show the warning message.
+
+ Inputs: top.lang, top.template
+ """
if template == "default":
gr.Warning(ALERTS["warn_no_instruct"][lang])
def get_trainer_info(lang: str, output_path: os.PathLike, do_train: bool) -> tuple[str, "gr.Slider", dict[str, Any]]:
+ r"""Get training infomation for monitor.
+
+ If do_train is True:
+ Inputs: top.lang, train.output_path
+ Outputs: train.output_box, train.progress_bar, train.loss_viewer, train.swanlab_link
+ If do_train is False:
+ Inputs: top.lang, eval.output_path
+ Outputs: eval.output_box, eval.progress_bar, None, None
+ """
running_log = ""
running_progress = gr.Slider(visible=False)
running_info = {}
@@ -119,6 +158,11 @@
def list_checkpoints(model_name: str, finetuning_type: str) -> "gr.Dropdown":
+ r"""List all available checkpoints.
+
+ Inputs: top.model_name, top.finetuning_type
+ Outputs: top.checkpoint_path
+ """
checkpoints = []
if model_name:
save_dir = get_save_dir(model_name, finetuning_type)
@@ -136,6 +180,11 @@
def list_config_paths(current_time: str) -> "gr.Dropdown":
+ r"""List all the saved configuration files.
+
+ Inputs: train.current_time
+ Outputs: train.config_path
+ """
config_files = [f"{current_time}.yaml"]
if os.path.isdir(DEFAULT_CONFIG_DIR):
for file_name in os.listdir(DEFAULT_CONFIG_DIR):
@@ -146,6 +195,11 @@
def list_datasets(dataset_dir: str = None, training_stage: str = list(TRAINING_STAGES.keys())[0]) -> "gr.Dropdown":
+ r"""List all available datasets in the dataset dir for the training stage.
+
+ Inputs: *.dataset_dir, *.training_stage
+ Outputs: *.dataset
+ """
dataset_info = load_dataset_info(dataset_dir if dataset_dir is not None else DEFAULT_DATA_DIR)
ranking = TRAINING_STAGES[training_stage] in STAGES_USE_PAIR_DATA
datasets = [k for k, v in dataset_info.items() if v.get("ranking", False) == ranking]
@@ -153,6 +207,11 @@
def list_output_dirs(model_name: str | None, finetuning_type: str, current_time: str) -> "gr.Dropdown":
+ r"""List all the directories that can resume from.
+
+ Inputs: top.model_name, top.finetuning_type, train.current_time
+ Outputs: train.output_dir
+ """
output_dirs = [f"train_{current_time}"]
if model_name:
save_dir = get_save_dir(model_name, finetuning_type)
@@ -162,4 +221,4 @@ if os.path.isdir(output_dir) and get_last_checkpoint(output_dir) is not None:
output_dirs.append(folder)
- return gr.Dropdown(choices=output_dirs)+ return gr.Dropdown(choices=output_dirs)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/webui/control.py |
Document my Python code with docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
def find_available_port() -> int:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def is_env_enabled(env_var: str, default: str = "0") -> bool:
return os.getenv(env_var, default).lower() in ["true", "yes", "on", "t", "y", "1"]
def use_ray() -> bool:
return False
def use_kt() -> bool:
return False | --- +++ @@ -17,6 +17,7 @@
def find_available_port() -> int:
+ """Find an available port on the local machine."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
@@ -25,6 +26,7 @@
def is_env_enabled(env_var: str, default: str = "0") -> bool:
+ """Check if the environment variable is enabled."""
return os.getenv(env_var, default).lower() in ["true", "yes", "on", "t", "y", "1"]
@@ -33,4 +35,4 @@
def use_kt() -> bool:
- return False+ return False
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/utils/env.py |
Insert docstrings into my code | # Copyright 2025 Optuna, HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v5.0.0rc0/src/transformers/utils/logging.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .types import ModelInput
class StatefulBuffer:
def __init__(self, max_buffer_size: int = 1_000_000_000) -> None:
self._buffer: list[ModelInput] = []
self._buffer_size: int = 0
self._max_buffer_size: int = max_buffer_size
def __len__(self) -> int:
return len(self._buffer)
@property
def size(self) -> int:
return self._buffer_size
def put(self, samples: list[ModelInput]) -> None:
num_tokens = sum(len(sample["input_ids"]) for sample in samples)
if self._buffer_size + num_tokens > self._max_buffer_size:
raise ValueError(f"Buffer size exceeds max buffer size {self._max_buffer_size}.")
self._buffer.extend(samples)
self._buffer_size += num_tokens
def get(self, value: int) -> list[ModelInput]:
samples = self._buffer[:value]
self._buffer_size -= sum(len(sample["input_ids"]) for sample in samples)
del self._buffer[:value]
return samples
def clear(self) -> None:
self._buffer = []
self._buffer_size = 0
def state_dict(self) -> dict:
return {
"buffer": self._buffer,
"buffer_size": self._buffer_size,
}
def load_state_dict(self, state_dict: dict) -> None:
self._buffer = state_dict["buffer"]
self._buffer_size = state_dict["buffer_size"] | --- +++ @@ -19,6 +19,7 @@
class StatefulBuffer:
+ """A buffer that stores model inputs."""
def __init__(self, max_buffer_size: int = 1_000_000_000) -> None:
self._buffer: list[ModelInput] = []
@@ -33,6 +34,7 @@ return self._buffer_size
def put(self, samples: list[ModelInput]) -> None:
+ """Add samples to the buffer."""
num_tokens = sum(len(sample["input_ids"]) for sample in samples)
if self._buffer_size + num_tokens > self._max_buffer_size:
raise ValueError(f"Buffer size exceeds max buffer size {self._max_buffer_size}.")
@@ -41,21 +43,25 @@ self._buffer_size += num_tokens
def get(self, value: int) -> list[ModelInput]:
+ """Get samples from the buffer and remove them."""
samples = self._buffer[:value]
self._buffer_size -= sum(len(sample["input_ids"]) for sample in samples)
del self._buffer[:value]
return samples
def clear(self) -> None:
+ """Clear the buffer."""
self._buffer = []
self._buffer_size = 0
def state_dict(self) -> dict:
+ """Returns the state of the buffer."""
return {
"buffer": self._buffer,
"buffer_size": self._buffer_size,
}
def load_state_dict(self, state_dict: dict) -> None:
+ """Loads the state into the buffer."""
self._buffer = state_dict["buffer"]
- self._buffer_size = state_dict["buffer_size"]+ self._buffer_size = state_dict["buffer_size"]
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/v1/utils/objects.py |
Add inline docstrings for readability | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections.abc import Generator
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any
from transformers.utils import is_torch_npu_available
from ..chat import ChatModel
from ..data import Role
from ..extras.constants import PEFT_METHODS
from ..extras.misc import torch_gc
from ..extras.packages import is_gradio_available
from .common import get_save_dir, load_config
from .locales import ALERTS
if TYPE_CHECKING:
from ..chat import BaseEngine
from .manager import Manager
if is_gradio_available():
import gradio as gr
def _escape_html(text: str) -> str:
return text.replace("<", "<").replace(">", ">")
def _format_response(text: str, lang: str, escape_html: bool, thought_words: tuple[str, str]) -> str:
if thought_words[0] not in text:
return _escape_html(text) if escape_html else text
text = text.replace(thought_words[0], "")
result = text.split(thought_words[1], maxsplit=1)
if len(result) == 1:
summary = ALERTS["info_thinking"][lang]
thought, answer = text, ""
else:
summary = ALERTS["info_thought"][lang]
thought, answer = result
if escape_html:
thought, answer = _escape_html(thought), _escape_html(answer)
return (
f"<details open><summary class='thinking-summary'><span>{summary}</span></summary>\n\n"
f"<div class='thinking-container'>\n{thought}\n</div>\n</details>{answer}"
)
@contextmanager
def update_attr(obj: Any, name: str, value: Any):
old_value = getattr(obj, name, None)
setattr(obj, name, value)
yield
setattr(obj, name, old_value)
class WebChatModel(ChatModel):
def __init__(self, manager: "Manager", demo_mode: bool = False, lazy_init: bool = True) -> None:
self.manager = manager
self.demo_mode = demo_mode
self.engine: BaseEngine | None = None
if not lazy_init: # read arguments from command line
super().__init__()
if demo_mode and os.getenv("DEMO_MODEL") and os.getenv("DEMO_TEMPLATE"): # load demo model
model_name_or_path = os.getenv("DEMO_MODEL")
template = os.getenv("DEMO_TEMPLATE")
infer_backend = os.getenv("DEMO_BACKEND", "huggingface")
super().__init__(
dict(model_name_or_path=model_name_or_path, template=template, infer_backend=infer_backend)
)
@property
def loaded(self) -> bool:
return self.engine is not None
def load_model(self, data) -> Generator[str, None, None]:
get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)]
lang, model_name, model_path = get("top.lang"), get("top.model_name"), get("top.model_path")
finetuning_type, checkpoint_path = get("top.finetuning_type"), get("top.checkpoint_path")
user_config = load_config()
error = ""
if self.loaded:
error = ALERTS["err_exists"][lang]
elif not model_name:
error = ALERTS["err_no_model"][lang]
elif not model_path:
error = ALERTS["err_no_path"][lang]
elif self.demo_mode:
error = ALERTS["err_demo"][lang]
try:
json.loads(get("infer.extra_args"))
except json.JSONDecodeError:
error = ALERTS["err_json_schema"][lang]
if error:
gr.Warning(error)
yield error
return
yield ALERTS["info_loading"][lang]
args = dict(
model_name_or_path=model_path,
cache_dir=user_config.get("cache_dir", None),
finetuning_type=finetuning_type,
template=get("top.template"),
rope_scaling=get("top.rope_scaling") if get("top.rope_scaling") != "none" else None,
flash_attn="fa2" if get("top.booster") == "flashattn2" else "auto",
use_unsloth=(get("top.booster") == "unsloth"),
enable_liger_kernel=(get("top.booster") == "liger_kernel"),
infer_backend=get("infer.infer_backend"),
infer_dtype=get("infer.infer_dtype"),
trust_remote_code=True,
)
args.update(json.loads(get("infer.extra_args")))
# checkpoints
if checkpoint_path:
if finetuning_type in PEFT_METHODS: # list
args["adapter_name_or_path"] = ",".join(
[get_save_dir(model_name, finetuning_type, adapter) for adapter in checkpoint_path]
)
else: # str
args["model_name_or_path"] = get_save_dir(model_name, finetuning_type, checkpoint_path)
# quantization
if get("top.quantization_bit") != "none":
args["quantization_bit"] = int(get("top.quantization_bit"))
args["quantization_method"] = get("top.quantization_method")
args["double_quantization"] = not is_torch_npu_available()
super().__init__(args)
yield ALERTS["info_loaded"][lang]
def unload_model(self, data) -> Generator[str, None, None]:
lang = data[self.manager.get_elem_by_id("top.lang")]
if self.demo_mode:
gr.Warning(ALERTS["err_demo"][lang])
yield ALERTS["err_demo"][lang]
return
yield ALERTS["info_unloading"][lang]
self.engine = None
torch_gc()
yield ALERTS["info_unloaded"][lang]
@staticmethod
def append(
chatbot: list[dict[str, str]],
messages: list[dict[str, str]],
role: str,
query: str,
escape_html: bool,
) -> tuple[list[dict[str, str]], list[dict[str, str]], str]:
return (
chatbot + [{"role": "user", "content": _escape_html(query) if escape_html else query}],
messages + [{"role": role, "content": query}],
"",
)
def stream(
self,
chatbot: list[dict[str, str]],
messages: list[dict[str, str]],
lang: str,
system: str,
tools: str,
image: Any | None,
video: Any | None,
audio: Any | None,
max_new_tokens: int,
top_p: float,
temperature: float,
skip_special_tokens: bool,
escape_html: bool,
enable_thinking: bool,
) -> Generator[tuple[list[dict[str, str]], list[dict[str, str]]], None, None]:
with update_attr(self.engine.template, "enable_thinking", enable_thinking):
chatbot.append({"role": "assistant", "content": ""})
response = ""
for new_text in self.stream_chat(
messages,
system,
tools,
images=[image] if image else None,
videos=[video] if video else None,
audios=[audio] if audio else None,
max_new_tokens=max_new_tokens,
top_p=top_p,
temperature=temperature,
skip_special_tokens=skip_special_tokens,
):
response += new_text
if tools:
result = self.engine.template.extract_tool(response)
else:
result = response
if isinstance(result, list):
tool_calls = [{"name": tool.name, "arguments": json.loads(tool.arguments)} for tool in result]
tool_calls = json.dumps(tool_calls, ensure_ascii=False)
output_messages = messages + [{"role": Role.FUNCTION.value, "content": tool_calls}]
bot_text = "```json\n" + tool_calls + "\n```"
else:
output_messages = messages + [{"role": Role.ASSISTANT.value, "content": result}]
bot_text = _format_response(result, lang, escape_html, self.engine.template.thought_words)
chatbot[-1] = {"role": "assistant", "content": bot_text}
yield chatbot, output_messages | --- +++ @@ -39,10 +39,15 @@
def _escape_html(text: str) -> str:
+ r"""Escape HTML characters."""
return text.replace("<", "<").replace(">", ">")
def _format_response(text: str, lang: str, escape_html: bool, thought_words: tuple[str, str]) -> str:
+ r"""Post-process the response text.
+
+ Based on: https://huggingface.co/spaces/Lyte/DeepSeek-R1-Distill-Qwen-1.5B-Demo-GGUF/blob/main/app.py
+ """
if thought_words[0] not in text:
return _escape_html(text) if escape_html else text
@@ -174,6 +179,11 @@ query: str,
escape_html: bool,
) -> tuple[list[dict[str, str]], list[dict[str, str]], str]:
+ r"""Add the user input to chatbot.
+
+ Inputs: infer.chatbot, infer.messages, infer.role, infer.query, infer.escape_html
+ Output: infer.chatbot, infer.messages, infer.query
+ """
return (
chatbot + [{"role": "user", "content": _escape_html(query) if escape_html else query}],
messages + [{"role": role, "content": query}],
@@ -197,6 +207,11 @@ escape_html: bool,
enable_thinking: bool,
) -> Generator[tuple[list[dict[str, str]], list[dict[str, str]]], None, None]:
+ r"""Generate output text in stream.
+
+ Inputs: infer.chatbot, infer.messages, infer.system, infer.tools, infer.image, infer.video, ...
+ Output: infer.chatbot, infer.messages
+ """
with update_attr(self.engine.template, "enable_thinking", enable_thinking):
chatbot.append({"role": "assistant", "content": ""})
response = ""
@@ -228,4 +243,4 @@ bot_text = _format_response(result, lang, escape_html, self.engine.template.thought_words)
chatbot[-1] = {"role": "assistant", "content": bot_text}
- yield chatbot, output_messages+ yield chatbot, output_messages
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/webui/chatter.py |
Create docstrings for API functions | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import signal
from collections import defaultdict
from datetime import datetime
from typing import Any
from psutil import Process
from yaml import safe_dump, safe_load
from ..extras import logging
from ..extras.constants import (
DATA_CONFIG,
DEFAULT_TEMPLATE,
MULTIMODAL_SUPPORTED_MODELS,
SUPPORTED_MODELS,
TRAINING_ARGS,
DownloadSource,
)
from ..extras.misc import use_modelscope, use_openmind
logger = logging.get_logger(__name__)
DEFAULT_CACHE_DIR = "llamaboard_cache"
DEFAULT_CONFIG_DIR = "llamaboard_config"
DEFAULT_DATA_DIR = "data"
DEFAULT_SAVE_DIR = "saves"
USER_CONFIG = "user_config.yaml"
def abort_process(pid: int) -> None:
try:
children = Process(pid).children()
if children:
for child in children:
abort_process(child.pid)
os.kill(pid, signal.SIGABRT)
except Exception:
pass
def get_save_dir(*paths: str) -> os.PathLike:
if os.path.sep in paths[-1]:
logger.warning_rank0("Found complex path, some features may be not available.")
return paths[-1]
paths = (path.replace(" ", "").strip() for path in paths)
return os.path.join(DEFAULT_SAVE_DIR, *paths)
def _get_config_path() -> os.PathLike:
return os.path.join(DEFAULT_CACHE_DIR, USER_CONFIG)
def load_config() -> dict[str, str | dict[str, Any]]:
try:
with open(_get_config_path(), encoding="utf-8") as f:
return safe_load(f)
except Exception:
return {"lang": None, "hub_name": None, "last_model": None, "path_dict": {}, "cache_dir": None}
def save_config(
lang: str, hub_name: str | None = None, model_name: str | None = None, model_path: str | None = None
) -> None:
os.makedirs(DEFAULT_CACHE_DIR, exist_ok=True)
user_config = load_config()
user_config["lang"] = lang or user_config["lang"]
if hub_name:
user_config["hub_name"] = hub_name
if model_name:
user_config["last_model"] = model_name
if model_name and model_path:
user_config["path_dict"][model_name] = model_path
with open(_get_config_path(), "w", encoding="utf-8") as f:
safe_dump(user_config, f)
def get_model_path(model_name: str) -> str:
user_config = load_config()
path_dict: dict[DownloadSource, str] = SUPPORTED_MODELS.get(model_name, defaultdict(str))
model_path = user_config["path_dict"].get(model_name, "") or path_dict.get(DownloadSource.DEFAULT, "")
if (
use_modelscope()
and path_dict.get(DownloadSource.MODELSCOPE)
and model_path == path_dict.get(DownloadSource.DEFAULT)
): # replace hf path with ms path
model_path = path_dict.get(DownloadSource.MODELSCOPE)
if (
use_openmind()
and path_dict.get(DownloadSource.OPENMIND)
and model_path == path_dict.get(DownloadSource.DEFAULT)
): # replace hf path with om path
model_path = path_dict.get(DownloadSource.OPENMIND)
return model_path
def get_template(model_name: str) -> str:
return DEFAULT_TEMPLATE.get(model_name, "default")
def get_time() -> str:
return datetime.now().strftime(r"%Y-%m-%d-%H-%M-%S")
def is_multimodal(model_name: str) -> bool:
return model_name in MULTIMODAL_SUPPORTED_MODELS
def load_dataset_info(dataset_dir: str) -> dict[str, dict[str, Any]]:
if dataset_dir == "ONLINE" or dataset_dir.startswith("REMOTE:"):
logger.info_rank0(f"dataset_dir is {dataset_dir}, using online dataset.")
return {}
try:
with open(os.path.join(dataset_dir, DATA_CONFIG), encoding="utf-8") as f:
return json.load(f)
except Exception as err:
logger.warning_rank0(f"Cannot open {os.path.join(dataset_dir, DATA_CONFIG)} due to {str(err)}.")
return {}
def load_args(config_path: str) -> dict[str, Any] | None:
try:
with open(config_path, encoding="utf-8") as f:
return safe_load(f)
except Exception:
return None
def save_args(config_path: str, config_dict: dict[str, Any]) -> None:
with open(config_path, "w", encoding="utf-8") as f:
safe_dump(config_dict, f)
def _clean_cmd(args: dict[str, Any]) -> dict[str, Any]:
no_skip_keys = [
"packing",
"enable_thinking",
"use_reentrant_gc",
"double_quantization",
"freeze_vision_tower",
"freeze_multi_modal_projector",
]
return {k: v for k, v in args.items() if (k in no_skip_keys) or (v is not None and v is not False and v != "")}
def gen_cmd(args: dict[str, Any]) -> str:
cmd_lines = ["llamafactory-cli train "]
for k, v in _clean_cmd(args).items():
if isinstance(v, dict):
cmd_lines.append(f" --{k} {json.dumps(v, ensure_ascii=False)} ")
elif isinstance(v, list):
cmd_lines.append(f" --{k} {' '.join(map(str, v))} ")
else:
cmd_lines.append(f" --{k} {str(v)} ")
if os.name == "nt":
cmd_text = "`\n".join(cmd_lines)
else:
cmd_text = "\\\n".join(cmd_lines)
cmd_text = f"```bash\n{cmd_text}\n```"
return cmd_text
def save_cmd(args: dict[str, Any]) -> str:
output_dir = args["output_dir"]
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, TRAINING_ARGS), "w", encoding="utf-8") as f:
safe_dump(_clean_cmd(args), f)
return os.path.join(output_dir, TRAINING_ARGS)
def load_eval_results(path: os.PathLike) -> str:
with open(path, encoding="utf-8") as f:
result = json.dumps(json.load(f), indent=4)
return f"```json\n{result}\n```\n"
def calculate_pixels(pixels: str) -> int:
if "*" in pixels:
return int(pixels.split("*")[0]) * int(pixels.split("*")[1])
else:
return int(pixels)
def create_ds_config() -> None:
os.makedirs(DEFAULT_CACHE_DIR, exist_ok=True)
ds_config = {
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"gradient_accumulation_steps": "auto",
"gradient_clipping": "auto",
"zero_allow_untested_optimizer": True,
"fp16": {
"enabled": "auto",
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1,
},
"bf16": {"enabled": "auto"},
}
offload_config = {
"device": "cpu",
"pin_memory": True,
}
ds_config["zero_optimization"] = {
"stage": 2,
"allgather_partitions": True,
"allgather_bucket_size": 5e8,
"overlap_comm": False,
"reduce_scatter": True,
"reduce_bucket_size": 5e8,
"contiguous_gradients": True,
"round_robin_gradients": True,
}
with open(os.path.join(DEFAULT_CACHE_DIR, "ds_z2_config.json"), "w", encoding="utf-8") as f:
json.dump(ds_config, f, indent=2)
ds_config["zero_optimization"]["offload_optimizer"] = offload_config
with open(os.path.join(DEFAULT_CACHE_DIR, "ds_z2_offload_config.json"), "w", encoding="utf-8") as f:
json.dump(ds_config, f, indent=2)
ds_config["zero_optimization"] = {
"stage": 3,
"overlap_comm": False,
"contiguous_gradients": True,
"sub_group_size": 1e9,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"stage3_max_live_parameters": 1e9,
"stage3_max_reuse_distance": 1e9,
"stage3_gather_16bit_weights_on_model_save": True,
}
with open(os.path.join(DEFAULT_CACHE_DIR, "ds_z3_config.json"), "w", encoding="utf-8") as f:
json.dump(ds_config, f, indent=2)
ds_config["zero_optimization"]["offload_optimizer"] = offload_config
ds_config["zero_optimization"]["offload_param"] = offload_config
with open(os.path.join(DEFAULT_CACHE_DIR, "ds_z3_offload_config.json"), "w", encoding="utf-8") as f:
json.dump(ds_config, f, indent=2) | --- +++ @@ -44,6 +44,7 @@
def abort_process(pid: int) -> None:
+ r"""Abort the processes recursively in a bottom-up way."""
try:
children = Process(pid).children()
if children:
@@ -56,6 +57,7 @@
def get_save_dir(*paths: str) -> os.PathLike:
+ r"""Get the path to saved model checkpoints."""
if os.path.sep in paths[-1]:
logger.warning_rank0("Found complex path, some features may be not available.")
return paths[-1]
@@ -65,10 +67,12 @@
def _get_config_path() -> os.PathLike:
+ r"""Get the path to user config."""
return os.path.join(DEFAULT_CACHE_DIR, USER_CONFIG)
def load_config() -> dict[str, str | dict[str, Any]]:
+ r"""Load user config if exists."""
try:
with open(_get_config_path(), encoding="utf-8") as f:
return safe_load(f)
@@ -79,6 +83,7 @@ def save_config(
lang: str, hub_name: str | None = None, model_name: str | None = None, model_path: str | None = None
) -> None:
+ r"""Save user config."""
os.makedirs(DEFAULT_CACHE_DIR, exist_ok=True)
user_config = load_config()
user_config["lang"] = lang or user_config["lang"]
@@ -96,6 +101,7 @@
def get_model_path(model_name: str) -> str:
+ r"""Get the model path according to the model name."""
user_config = load_config()
path_dict: dict[DownloadSource, str] = SUPPORTED_MODELS.get(model_name, defaultdict(str))
model_path = user_config["path_dict"].get(model_name, "") or path_dict.get(DownloadSource.DEFAULT, "")
@@ -117,18 +123,22 @@
def get_template(model_name: str) -> str:
+ r"""Get the template name if the model is a chat/distill/instruct model."""
return DEFAULT_TEMPLATE.get(model_name, "default")
def get_time() -> str:
+ r"""Get current date and time."""
return datetime.now().strftime(r"%Y-%m-%d-%H-%M-%S")
def is_multimodal(model_name: str) -> bool:
+ r"""Judge if the model is a vision language model."""
return model_name in MULTIMODAL_SUPPORTED_MODELS
def load_dataset_info(dataset_dir: str) -> dict[str, dict[str, Any]]:
+ r"""Load dataset_info.json."""
if dataset_dir == "ONLINE" or dataset_dir.startswith("REMOTE:"):
logger.info_rank0(f"dataset_dir is {dataset_dir}, using online dataset.")
return {}
@@ -142,6 +152,7 @@
def load_args(config_path: str) -> dict[str, Any] | None:
+ r"""Load the training configuration from config path."""
try:
with open(config_path, encoding="utf-8") as f:
return safe_load(f)
@@ -150,11 +161,13 @@
def save_args(config_path: str, config_dict: dict[str, Any]) -> None:
+ r"""Save the training configuration to config path."""
with open(config_path, "w", encoding="utf-8") as f:
safe_dump(config_dict, f)
def _clean_cmd(args: dict[str, Any]) -> dict[str, Any]:
+ r"""Remove args with NoneType or False or empty string value."""
no_skip_keys = [
"packing",
"enable_thinking",
@@ -167,6 +180,7 @@
def gen_cmd(args: dict[str, Any]) -> str:
+ r"""Generate CLI commands for previewing."""
cmd_lines = ["llamafactory-cli train "]
for k, v in _clean_cmd(args).items():
if isinstance(v, dict):
@@ -186,6 +200,7 @@
def save_cmd(args: dict[str, Any]) -> str:
+ r"""Save CLI commands to launch training."""
output_dir = args["output_dir"]
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, TRAINING_ARGS), "w", encoding="utf-8") as f:
@@ -195,6 +210,7 @@
def load_eval_results(path: os.PathLike) -> str:
+ r"""Get scores after evaluation."""
with open(path, encoding="utf-8") as f:
result = json.dumps(json.load(f), indent=4)
@@ -202,6 +218,7 @@
def calculate_pixels(pixels: str) -> int:
+ r"""Calculate the number of pixels from the expression."""
if "*" in pixels:
return int(pixels.split("*")[0]) * int(pixels.split("*")[1])
else:
@@ -209,6 +226,7 @@
def create_ds_config() -> None:
+ r"""Create deepspeed config in the current directory."""
os.makedirs(DEFAULT_CACHE_DIR, exist_ok=True)
ds_config = {
"train_batch_size": "auto",
@@ -265,4 +283,4 @@ ds_config["zero_optimization"]["offload_optimizer"] = offload_config
ds_config["zero_optimization"]["offload_param"] = offload_config
with open(os.path.join(DEFAULT_CACHE_DIR, "ds_z3_offload_config.json"), "w", encoding="utf-8") as f:
- json.dump(ds_config, f, indent=2)+ json.dump(ds_config, f, indent=2)
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/webui/common.py |
Generate documentation strings for clarity | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any
from .chatter import WebChatModel
from .common import create_ds_config, get_time, load_config
from .locales import LOCALES
from .manager import Manager
from .runner import Runner
if TYPE_CHECKING:
from gradio.components import Component
class Engine:
def __init__(self, demo_mode: bool = False, pure_chat: bool = False) -> None:
self.demo_mode = demo_mode
self.pure_chat = pure_chat
self.manager = Manager()
self.runner = Runner(self.manager, demo_mode)
self.chatter = WebChatModel(self.manager, demo_mode, lazy_init=(not pure_chat))
if not demo_mode:
create_ds_config()
def _update_component(self, input_dict: dict[str, dict[str, Any]]) -> dict["Component", "Component"]:
output_dict: dict[Component, Component] = {}
for elem_id, elem_attr in input_dict.items():
elem = self.manager.get_elem_by_id(elem_id)
output_dict[elem] = elem.__class__(**elem_attr)
return output_dict
def resume(self):
user_config = load_config() if not self.demo_mode else {} # do not use config in demo mode
lang = user_config.get("lang") or "en"
init_dict = {"top.lang": {"value": lang}, "infer.chat_box": {"visible": self.chatter.loaded}}
if not self.pure_chat:
current_time = get_time()
hub_name = user_config.get("hub_name") or "huggingface"
init_dict["top.hub_name"] = {"value": hub_name}
init_dict["train.current_time"] = {"value": current_time}
init_dict["train.output_dir"] = {"value": f"train_{current_time}"}
init_dict["train.config_path"] = {"value": f"{current_time}.yaml"}
init_dict["eval.output_dir"] = {"value": f"eval_{current_time}"}
init_dict["infer.mm_box"] = {"visible": False}
if user_config.get("last_model", None):
init_dict["top.model_name"] = {"value": user_config["last_model"]}
yield self._update_component(init_dict)
if self.runner.running and not self.demo_mode and not self.pure_chat:
yield {elem: elem.__class__(value=value) for elem, value in self.runner.running_data.items()}
if self.runner.do_train:
yield self._update_component({"train.resume_btn": {"value": True}})
else:
yield self._update_component({"eval.resume_btn": {"value": True}})
def change_lang(self, lang: str):
return {
elem: elem.__class__(**LOCALES[elem_name][lang])
for elem_name, elem in self.manager.get_elem_iter()
if elem_name in LOCALES
} | --- +++ @@ -26,6 +26,7 @@
class Engine:
+ r"""A general engine to control the behaviors of Web UI."""
def __init__(self, demo_mode: bool = False, pure_chat: bool = False) -> None:
self.demo_mode = demo_mode
@@ -37,6 +38,7 @@ create_ds_config()
def _update_component(self, input_dict: dict[str, dict[str, Any]]) -> dict["Component", "Component"]:
+ r"""Update gradio components according to the (elem_id, properties) mapping."""
output_dict: dict[Component, Component] = {}
for elem_id, elem_attr in input_dict.items():
elem = self.manager.get_elem_by_id(elem_id)
@@ -45,6 +47,7 @@ return output_dict
def resume(self):
+ r"""Get the initial value of gradio components and restores training status if necessary."""
user_config = load_config() if not self.demo_mode else {} # do not use config in demo mode
lang = user_config.get("lang") or "en"
init_dict = {"top.lang": {"value": lang}, "infer.chat_box": {"visible": self.chatter.loaded}}
@@ -72,8 +75,9 @@ yield self._update_component({"eval.resume_btn": {"value": True}})
def change_lang(self, lang: str):
+ r"""Update the displayed language of gradio components."""
return {
elem: elem.__class__(**LOCALES[elem_name][lang])
for elem_name, elem in self.manager.get_elem_iter()
if elem_name in LOCALES
- }+ }
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/webui/engine.py |
Add return value explanations in docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import TYPE_CHECKING, Any
from ...extras.constants import DATA_CONFIG
from ...extras.packages import is_gradio_available
if is_gradio_available():
import gradio as gr
if TYPE_CHECKING:
from gradio.components import Component
PAGE_SIZE = 2
def prev_page(page_index: int) -> int:
return page_index - 1 if page_index > 0 else page_index
def next_page(page_index: int, total_num: int) -> int:
return page_index + 1 if (page_index + 1) * PAGE_SIZE < total_num else page_index
def can_preview(dataset_dir: str, dataset: list) -> "gr.Button":
try:
with open(os.path.join(dataset_dir, DATA_CONFIG), encoding="utf-8") as f:
dataset_info = json.load(f)
except Exception:
return gr.Button(interactive=False)
if len(dataset) == 0 or "file_name" not in dataset_info[dataset[0]]:
return gr.Button(interactive=False)
data_path = os.path.join(dataset_dir, dataset_info[dataset[0]]["file_name"])
if os.path.isfile(data_path) or (os.path.isdir(data_path) and os.listdir(data_path)):
return gr.Button(interactive=True)
else:
return gr.Button(interactive=False)
def _load_data_file(file_path: str) -> list[Any]:
with open(file_path, encoding="utf-8") as f:
if file_path.endswith(".json"):
return json.load(f)
elif file_path.endswith(".jsonl"):
return [json.loads(line) for line in f]
else:
return list(f)
def get_preview(dataset_dir: str, dataset: list, page_index: int) -> tuple[int, list, "gr.Column"]:
with open(os.path.join(dataset_dir, DATA_CONFIG), encoding="utf-8") as f:
dataset_info = json.load(f)
data_path = os.path.join(dataset_dir, dataset_info[dataset[0]]["file_name"])
if os.path.isfile(data_path):
data = _load_data_file(data_path)
else:
data = []
for file_name in os.listdir(data_path):
data.extend(_load_data_file(os.path.join(data_path, file_name)))
return len(data), data[PAGE_SIZE * page_index : PAGE_SIZE * (page_index + 1)], gr.Column(visible=True)
def create_preview_box(dataset_dir: "gr.Textbox", dataset: "gr.Dropdown") -> dict[str, "Component"]:
data_preview_btn = gr.Button(interactive=False, scale=1)
with gr.Column(visible=False, elem_classes="modal-box") as preview_box:
with gr.Row():
preview_count = gr.Number(value=0, interactive=False, precision=0)
page_index = gr.Number(value=0, interactive=False, precision=0)
with gr.Row():
prev_btn = gr.Button()
next_btn = gr.Button()
close_btn = gr.Button()
with gr.Row():
preview_samples = gr.JSON()
dataset.change(can_preview, [dataset_dir, dataset], [data_preview_btn], queue=False).then(
lambda: 0, outputs=[page_index], queue=False
)
data_preview_btn.click(
get_preview, [dataset_dir, dataset, page_index], [preview_count, preview_samples, preview_box], queue=False
)
prev_btn.click(prev_page, [page_index], [page_index], queue=False).then(
get_preview, [dataset_dir, dataset, page_index], [preview_count, preview_samples, preview_box], queue=False
)
next_btn.click(next_page, [page_index, preview_count], [page_index], queue=False).then(
get_preview, [dataset_dir, dataset, page_index], [preview_count, preview_samples, preview_box], queue=False
)
close_btn.click(lambda: gr.Column(visible=False), outputs=[preview_box], queue=False)
return dict(
data_preview_btn=data_preview_btn,
preview_count=preview_count,
page_index=page_index,
prev_btn=prev_btn,
next_btn=next_btn,
close_btn=close_btn,
preview_samples=preview_samples,
) | --- +++ @@ -40,6 +40,7 @@
def can_preview(dataset_dir: str, dataset: list) -> "gr.Button":
+ r"""Check if the dataset is a local dataset."""
try:
with open(os.path.join(dataset_dir, DATA_CONFIG), encoding="utf-8") as f:
dataset_info = json.load(f)
@@ -67,6 +68,7 @@
def get_preview(dataset_dir: str, dataset: list, page_index: int) -> tuple[int, list, "gr.Column"]:
+ r"""Get the preview samples from the dataset."""
with open(os.path.join(dataset_dir, DATA_CONFIG), encoding="utf-8") as f:
dataset_info = json.load(f)
@@ -117,4 +119,4 @@ next_btn=next_btn,
close_btn=close_btn,
preview_samples=preview_samples,
- )+ )
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/webui/components/data.py |
Add docstrings for production code | from __future__ import annotations
import os
import shutil
import string
from importlib import import_module
from pathlib import Path
from typing import TYPE_CHECKING, Any, cast
from urllib.parse import urlparse
import scrapy
from scrapy.commands import ScrapyCommand
from scrapy.exceptions import UsageError
from scrapy.spiderloader import get_spider_loader
from scrapy.utils.template import render_templatefile, string_camelcase
if TYPE_CHECKING:
import argparse
def sanitize_module_name(module_name: str) -> str:
module_name = module_name.replace("-", "_").replace(".", "_")
if module_name[0] not in string.ascii_letters:
module_name = "a" + module_name
return module_name
def extract_domain(url: str) -> str:
o = urlparse(url)
if o.scheme == "" and o.netloc == "":
o = urlparse("//" + url.lstrip("/"))
return o.netloc
def verify_url_scheme(url: str) -> str:
parsed = urlparse(url)
if parsed.scheme == "" and parsed.netloc == "":
parsed = urlparse("//" + url)._replace(scheme="https")
return parsed.geturl()
class Command(ScrapyCommand):
requires_crawler_process = False
default_settings = {"LOG_ENABLED": False}
def syntax(self) -> str:
return "[options] <name> <domain>"
def short_desc(self) -> str:
return "Generate new spider using pre-defined templates"
def add_options(self, parser: argparse.ArgumentParser) -> None:
super().add_options(parser)
parser.add_argument(
"-l",
"--list",
dest="list",
action="store_true",
help="List available templates",
)
parser.add_argument(
"-e",
"--edit",
dest="edit",
action="store_true",
help="Edit spider after creating it",
)
parser.add_argument(
"-d",
"--dump",
dest="dump",
metavar="TEMPLATE",
help="Dump template to standard output",
)
parser.add_argument(
"-t",
"--template",
dest="template",
default="basic",
help="Uses a custom template.",
)
parser.add_argument(
"--force",
dest="force",
action="store_true",
help="If the spider already exists, overwrite it with the template",
)
def run(self, args: list[str], opts: argparse.Namespace) -> None:
assert self.settings is not None
if opts.list:
self._list_templates()
return
if opts.dump:
template_file = self._find_template(opts.dump)
if template_file:
print(template_file.read_text(encoding="utf-8"))
return
if len(args) != 2:
raise UsageError
name, url = args[0:2]
url = verify_url_scheme(url)
module = sanitize_module_name(name)
if self.settings.get("BOT_NAME") == module:
print("Cannot create a spider with the same name as your project")
return
if not opts.force and self._spider_exists(name):
return
template_file = self._find_template(opts.template)
if template_file:
self._genspider(module, name, url, opts.template, template_file)
if opts.edit:
self.exitcode = os.system(f'scrapy edit "{name}"') # noqa: S605
def _generate_template_variables(
self,
module: str,
name: str,
url: str,
template_name: str,
) -> dict[str, Any]:
assert self.settings is not None
capitalized_module = "".join(s.capitalize() for s in module.split("_"))
return {
"project_name": self.settings.get("BOT_NAME"),
"ProjectName": string_camelcase(self.settings.get("BOT_NAME")),
"module": module,
"name": name,
"url": url,
"domain": extract_domain(url),
"classname": f"{capitalized_module}Spider",
}
def _genspider(
self,
module: str,
name: str,
url: str,
template_name: str,
template_file: str | os.PathLike,
) -> None:
assert self.settings is not None
tvars = self._generate_template_variables(module, name, url, template_name)
if self.settings.get("NEWSPIDER_MODULE"):
spiders_module = import_module(self.settings["NEWSPIDER_MODULE"])
assert spiders_module.__file__
spiders_dir = Path(spiders_module.__file__).parent.resolve()
else:
spiders_module = None
spiders_dir = Path()
spider_file = f"{spiders_dir / module}.py"
shutil.copyfile(template_file, spider_file)
render_templatefile(spider_file, **tvars)
print(
f"Created spider {name!r} using template {template_name!r} ",
end=("" if spiders_module else "\n"),
)
if spiders_module:
print(f"in module:\n {spiders_module.__name__}.{module}")
def _find_template(self, template: str) -> Path | None:
template_file = Path(self.templates_dir, f"{template}.tmpl")
if template_file.exists():
return template_file
print(f"Unable to find template: {template}\n")
print('Use "scrapy genspider --list" to see all available templates.')
return None
def _list_templates(self) -> None:
print("Available templates:")
for file in sorted(Path(self.templates_dir).iterdir()):
if file.suffix == ".tmpl":
print(f" {file.stem}")
def _spider_exists(self, name: str) -> bool:
assert self.settings is not None
if not self.settings.get("NEWSPIDER_MODULE"):
# if run as a standalone command and file with same filename already exists
path = Path(name + ".py")
if path.exists():
print(f"{path.resolve()} already exists")
return True
return False
spider_loader = get_spider_loader(self.settings)
try:
spidercls = spider_loader.load(name)
except KeyError:
pass
else:
# if spider with same name exists
print(f"Spider {name!r} already exists in module:")
print(f" {spidercls.__module__}")
return True
# a file with the same name exists in the target directory
spiders_module = import_module(self.settings["NEWSPIDER_MODULE"])
spiders_dir = Path(cast("str", spiders_module.__file__)).parent
spiders_dir_abs = spiders_dir.resolve()
path = spiders_dir_abs / (name + ".py")
if path.exists():
print(f"{path} already exists")
return True
return False
@property
def templates_dir(self) -> str:
assert self.settings is not None
return str(
Path(
self.settings["TEMPLATES_DIR"] or Path(scrapy.__path__[0], "templates"),
"spiders",
)
) | --- +++ @@ -19,6 +19,10 @@
def sanitize_module_name(module_name: str) -> str:
+ """Sanitize the given module name, by replacing dashes and points
+ with underscores and prefixing it with a letter if it doesn't start
+ with one
+ """
module_name = module_name.replace("-", "_").replace(".", "_")
if module_name[0] not in string.ascii_letters:
module_name = "a" + module_name
@@ -26,6 +30,7 @@
def extract_domain(url: str) -> str:
+ """Extract domain name from URL string"""
o = urlparse(url)
if o.scheme == "" and o.netloc == "":
o = urlparse("//" + url.lstrip("/"))
@@ -33,6 +38,7 @@
def verify_url_scheme(url: str) -> str:
+ """Check url for scheme and insert https if none found."""
parsed = urlparse(url)
if parsed.scheme == "" and parsed.netloc == "":
parsed = urlparse("//" + url)._replace(scheme="https")
@@ -143,6 +149,7 @@ template_name: str,
template_file: str | os.PathLike,
) -> None:
+ """Generate the spider module, based on the given template"""
assert self.settings is not None
tvars = self._generate_template_variables(module, name, url, template_name)
if self.settings.get("NEWSPIDER_MODULE"):
@@ -216,4 +223,4 @@ self.settings["TEMPLATES_DIR"] or Path(scrapy.__path__[0], "templates"),
"spiders",
)
- )+ )
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/commands/genspider.py |
Add verbose docstrings with examples |
from __future__ import annotations
import argparse
import builtins
import os
from abc import ABC, abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING, Any
from twisted.python import failure
from scrapy.exceptions import UsageError
from scrapy.utils.conf import arglist_to_dict, feed_process_params_from_cli
if TYPE_CHECKING:
from collections.abc import Iterable
from scrapy.crawler import Crawler, CrawlerProcessBase
from scrapy.settings import Settings
class ScrapyCommand(ABC):
requires_project: bool = False
requires_crawler_process: bool = True
crawler_process: CrawlerProcessBase | None = None # set in scrapy.cmdline
# default settings to be used for this command instead of global defaults
default_settings: dict[str, Any] = {}
exitcode: int = 0
def __init__(self) -> None:
self.settings: Settings | None = None # set in scrapy.cmdline
def set_crawler(self, crawler: Crawler) -> None:
if hasattr(self, "_crawler"):
raise RuntimeError("crawler already set")
self._crawler: Crawler = crawler
def syntax(self) -> str:
return ""
@abstractmethod
def short_desc(self) -> str:
return ""
def long_desc(self) -> str:
return self.short_desc()
def help(self) -> str:
return self.long_desc()
def add_options(self, parser: argparse.ArgumentParser) -> None:
assert self.settings is not None
group = parser.add_argument_group(title="Global Options")
group.add_argument(
"--logfile", metavar="FILE", help="log file. if omitted stderr will be used"
)
group.add_argument(
"-L",
"--loglevel",
metavar="LEVEL",
default=None,
help=f"log level (default: {self.settings['LOG_LEVEL']})",
)
group.add_argument(
"--nolog", action="store_true", help="disable logging completely"
)
group.add_argument(
"--profile",
metavar="FILE",
default=None,
help="write python cProfile stats to FILE",
)
group.add_argument("--pidfile", metavar="FILE", help="write process ID to FILE")
group.add_argument(
"-s",
"--set",
action="append",
default=[],
metavar="NAME=VALUE",
help="set/override setting (may be repeated)",
)
group.add_argument("--pdb", action="store_true", help="enable pdb on failure")
def process_options(self, args: list[str], opts: argparse.Namespace) -> None:
assert self.settings is not None
try:
self.settings.setdict(arglist_to_dict(opts.set), priority="cmdline")
except ValueError:
raise UsageError("Invalid -s value, use -s NAME=VALUE", print_help=False)
if opts.logfile:
self.settings.set("LOG_ENABLED", True, priority="cmdline")
self.settings.set("LOG_FILE", opts.logfile, priority="cmdline")
if opts.loglevel:
self.settings.set("LOG_ENABLED", True, priority="cmdline")
self.settings.set("LOG_LEVEL", opts.loglevel, priority="cmdline")
if opts.nolog:
self.settings.set("LOG_ENABLED", False, priority="cmdline")
if opts.pidfile:
Path(opts.pidfile).write_text(
str(os.getpid()) + os.linesep, encoding="utf-8"
)
if opts.pdb:
failure.startDebugMode()
@abstractmethod
def run(self, args: list[str], opts: argparse.Namespace) -> None:
raise NotImplementedError
class BaseRunSpiderCommand(ScrapyCommand):
def add_options(self, parser: argparse.ArgumentParser) -> None:
super().add_options(parser)
parser.add_argument(
"-a",
dest="spargs",
action="append",
default=[],
metavar="NAME=VALUE",
help="set spider argument (may be repeated)",
)
parser.add_argument(
"-o",
"--output",
metavar="FILE",
action="append",
help="append scraped items to the end of FILE (use - for stdout),"
" to define format set a colon at the end of the output URI (i.e. -o FILE:FORMAT)",
)
parser.add_argument(
"-O",
"--overwrite-output",
metavar="FILE",
action="append",
help="dump scraped items into FILE, overwriting any existing file,"
" to define format set a colon at the end of the output URI (i.e. -O FILE:FORMAT)",
)
def process_options(self, args: list[str], opts: argparse.Namespace) -> None:
super().process_options(args, opts)
try:
opts.spargs = arglist_to_dict(opts.spargs)
except ValueError:
raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
if opts.output or opts.overwrite_output:
assert self.settings is not None
feeds = feed_process_params_from_cli(
self.settings,
opts.output,
overwrite_output=opts.overwrite_output,
)
self.settings.set("FEEDS", feeds, priority="cmdline")
class ScrapyHelpFormatter(argparse.HelpFormatter):
def __init__(
self,
prog: str,
indent_increment: int = 2,
max_help_position: int = 24,
width: int | None = None,
):
super().__init__(
prog,
indent_increment=indent_increment,
max_help_position=max_help_position,
width=width,
)
def _join_parts(self, part_strings: Iterable[str]) -> str:
# scrapy.commands.list shadows builtins.list
parts = self.format_part_strings(builtins.list(part_strings))
return super()._join_parts(parts)
def format_part_strings(self, part_strings: list[str]) -> list[str]:
if part_strings and part_strings[0].startswith("usage: "):
part_strings[0] = "Usage\n=====\n " + part_strings[0][len("usage: ") :]
headings = [
i for i in range(len(part_strings)) if part_strings[i].endswith(":\n")
]
for index in headings[::-1]:
char = "-" if "Global Options" in part_strings[index] else "="
part_strings[index] = part_strings[index][:-2].title()
underline = "".join(["\n", (char * len(part_strings[index])), "\n"])
part_strings.insert(index + 1, underline)
return part_strings | --- +++ @@ -1,3 +1,6 @@+"""
+Base class for Scrapy commands
+"""
from __future__ import annotations
@@ -39,19 +42,36 @@ self._crawler: Crawler = crawler
def syntax(self) -> str:
+ """
+ Command syntax (preferably one-line). Do not include command name.
+ """
return ""
@abstractmethod
def short_desc(self) -> str:
+ """
+ A short description of the command
+ """
return ""
def long_desc(self) -> str:
+ """A long description of the command. Return short description when not
+ available. It cannot contain newlines since contents will be formatted
+ by optparser which removes newlines and wraps text.
+ """
return self.short_desc()
def help(self) -> str:
+ """An extensive help for the command. It will be shown when using the
+ "help" command. It can contain newlines since no post-formatting will
+ be applied to its contents.
+ """
return self.long_desc()
def add_options(self, parser: argparse.ArgumentParser) -> None:
+ """
+ Populate option parse with options available for this command
+ """
assert self.settings is not None
group = parser.add_argument_group(title="Global Options")
group.add_argument(
@@ -112,10 +132,16 @@
@abstractmethod
def run(self, args: list[str], opts: argparse.Namespace) -> None:
+ """
+ Entry point for running commands
+ """
raise NotImplementedError
class BaseRunSpiderCommand(ScrapyCommand):
+ """
+ Common class used to share functionality between the crawl, parse and runspider commands
+ """
def add_options(self, parser: argparse.ArgumentParser) -> None:
super().add_options(parser)
@@ -161,6 +187,9 @@
class ScrapyHelpFormatter(argparse.HelpFormatter):
+ """
+ Help Formatter for scrapy command line help messages.
+ """
def __init__(
self,
@@ -182,6 +211,9 @@ return super()._join_parts(parts)
def format_part_strings(self, part_strings: list[str]) -> list[str]:
+ """
+ Underline and title case command line help message headers.
+ """
if part_strings and part_strings[0].startswith("usage: "):
part_strings[0] = "Usage\n=====\n " + part_strings[0][len("usage: ") :]
headings = [
@@ -192,4 +224,4 @@ part_strings[index] = part_strings[index][:-2].title()
underline = "".join(["\n", (char * len(part_strings[index])), "\n"])
part_strings.insert(index + 1, underline)
- return part_strings+ return part_strings
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/commands/__init__.py |
Document this script properly | from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
from scrapy.exceptions import NotConfigured
from scrapy.utils.conf import build_component_list
from scrapy.utils.misc import build_from_crawler, load_object
if TYPE_CHECKING:
from scrapy.crawler import Crawler
from scrapy.settings import BaseSettings, Settings
logger = logging.getLogger(__name__)
class AddonManager:
def __init__(self, crawler: Crawler) -> None:
self.crawler: Crawler = crawler
self.addons: list[Any] = []
def load_settings(self, settings: Settings) -> None:
for clspath in build_component_list(settings["ADDONS"]):
try:
addoncls = load_object(clspath)
addon = build_from_crawler(addoncls, self.crawler)
if hasattr(addon, "update_settings"):
addon.update_settings(settings)
self.addons.append(addon)
except NotConfigured as e:
if e.args:
logger.warning(
"Disabled %(clspath)s: %(eargs)s",
{"clspath": clspath, "eargs": e.args[0]},
extra={"crawler": self.crawler},
)
logger.info(
"Enabled addons:\n%(addons)s",
{
"addons": self.addons,
},
extra={"crawler": self.crawler},
)
@classmethod
def load_pre_crawler_settings(cls, settings: BaseSettings) -> None:
for clspath in build_component_list(settings["ADDONS"]):
addoncls = load_object(clspath)
if hasattr(addoncls, "update_pre_crawler_settings"):
addoncls.update_pre_crawler_settings(settings) | --- +++ @@ -16,12 +16,22 @@
class AddonManager:
+ """This class facilitates loading and storing :ref:`topics-addons`."""
def __init__(self, crawler: Crawler) -> None:
self.crawler: Crawler = crawler
self.addons: list[Any] = []
def load_settings(self, settings: Settings) -> None:
+ """Load add-ons and configurations from a settings object and apply them.
+
+ This will load the add-on for every add-on path in the
+ ``ADDONS`` setting and execute their ``update_settings`` methods.
+
+ :param settings: The :class:`~scrapy.settings.Settings` object from \
+ which to read the add-on configuration
+ :type settings: :class:`~scrapy.settings.Settings`
+ """
for clspath in build_component_list(settings["ADDONS"]):
try:
addoncls = load_object(clspath)
@@ -46,7 +56,17 @@
@classmethod
def load_pre_crawler_settings(cls, settings: BaseSettings) -> None:
+ """Update early settings that do not require a crawler instance, such as SPIDER_MODULES.
+
+ Similar to the load_settings method, this loads each add-on configured in the
+ ``ADDONS`` setting and calls their 'update_pre_crawler_settings' class method if present.
+ This method doesn't have access to the crawler instance or the addons list.
+
+ :param settings: The :class:`~scrapy.settings.BaseSettings` object from \
+ which to read the early add-on configuration
+ :type settings: :class:`~scrapy.settings.Settings`
+ """
for clspath in build_component_list(settings["ADDONS"]):
addoncls = load_object(clspath)
if hasattr(addoncls, "update_pre_crawler_settings"):
- addoncls.update_pre_crawler_settings(settings)+ addoncls.update_pre_crawler_settings(settings)
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/addons.py |
Add docstrings to incomplete code | from __future__ import annotations
import logging
import re
from typing import TYPE_CHECKING
from warnings import warn
from w3lib import html
from scrapy.exceptions import NotConfigured, ScrapyDeprecationWarning
from scrapy.http import HtmlResponse, Response
from scrapy.utils.url import escape_ajax
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request, Spider
from scrapy.crawler import Crawler
from scrapy.settings import BaseSettings
logger = logging.getLogger(__name__)
class AjaxCrawlMiddleware:
def __init__(self, settings: BaseSettings):
if not settings.getbool("AJAXCRAWL_ENABLED"):
raise NotConfigured
warn(
"scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware is deprecated"
" and will be removed in a future Scrapy version.",
ScrapyDeprecationWarning,
stacklevel=2,
)
# XXX: Google parses at least first 100k bytes; scrapy's redirect
# middleware parses first 4k. 4k turns out to be insufficient
# for this middleware, and parsing 100k could be slow.
# We use something in between (32K) by default.
self.lookup_bytes: int = settings.getint("AJAXCRAWL_MAXSIZE")
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler.settings)
def process_response(
self, request: Request, response: Response, spider: Spider
) -> Request | Response:
if not isinstance(response, HtmlResponse) or response.status != 200:
return response
if request.method != "GET":
# other HTTP methods are either not safe or don't have a body
return response
if "ajax_crawlable" in request.meta: # prevent loops
return response
if not self._has_ajax_crawlable_variant(response):
return response
ajax_crawl_request = request.replace(url=escape_ajax(request.url + "#!"))
logger.debug(
"Downloading AJAX crawlable %(ajax_crawl_request)s instead of %(request)s",
{"ajax_crawl_request": ajax_crawl_request, "request": request},
extra={"spider": spider},
)
ajax_crawl_request.meta["ajax_crawlable"] = True
return ajax_crawl_request
def _has_ajax_crawlable_variant(self, response: Response) -> bool:
body = response.text[: self.lookup_bytes]
return _has_ajaxcrawlable_meta(body)
_ajax_crawlable_re: re.Pattern[str] = re.compile(
r'<meta\s+name=["\']fragment["\']\s+content=["\']!["\']/?>'
)
def _has_ajaxcrawlable_meta(text: str) -> bool:
# Stripping scripts and comments is slow (about 20x slower than
# just checking if a string is in text); this is a quick fail-fast
# path that should work for most pages.
if "fragment" not in text:
return False
if "content" not in text:
return False
text = html.remove_tags_with_content(text, ("script", "noscript"))
text = html.replace_entities(text)
text = html.remove_comments(text)
return _ajax_crawlable_re.search(text) is not None | --- +++ @@ -24,6 +24,9 @@
class AjaxCrawlMiddleware:
+ """
+ Handle 'AJAX crawlable' pages marked as crawlable via meta tag.
+ """
def __init__(self, settings: BaseSettings):
if not settings.getbool("AJAXCRAWL_ENABLED"):
@@ -73,6 +76,9 @@ return ajax_crawl_request
def _has_ajax_crawlable_variant(self, response: Response) -> bool:
+ """
+ Return True if a page without hash fragment could be "AJAX crawlable".
+ """
body = response.text[: self.lookup_bytes]
return _has_ajaxcrawlable_meta(body)
@@ -83,6 +89,16 @@
def _has_ajaxcrawlable_meta(text: str) -> bool:
+ """
+ >>> _has_ajaxcrawlable_meta('<html><head><meta name="fragment" content="!"/></head><body></body></html>')
+ True
+ >>> _has_ajaxcrawlable_meta("<html><head><meta name='fragment' content='!'></head></html>")
+ True
+ >>> _has_ajaxcrawlable_meta('<html><head><!--<meta name="fragment" content="!"/>--></head><body></body></html>')
+ False
+ >>> _has_ajaxcrawlable_meta('<html></html>')
+ False
+ """
# Stripping scripts and comments is slow (about 20x slower than
# just checking if a string is in text); this is a quick fail-fast
@@ -95,4 +111,4 @@ text = html.remove_tags_with_content(text, ("script", "noscript"))
text = html.replace_entities(text)
text = html.remove_comments(text)
- return _ajax_crawlable_re.search(text) is not None+ return _ajax_crawlable_re.search(text) is not None
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/downloadermiddlewares/ajaxcrawl.py |
Include argument descriptions in docstrings | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections.abc import Generator
from copy import deepcopy
from subprocess import PIPE, Popen, TimeoutExpired
from typing import TYPE_CHECKING, Any
from transformers.utils import is_torch_npu_available
from ..extras.constants import LLAMABOARD_CONFIG, MULTIMODAL_SUPPORTED_MODELS, PEFT_METHODS, TRAINING_STAGES
from ..extras.misc import is_accelerator_available, torch_gc
from ..extras.packages import is_gradio_available
from .common import (
DEFAULT_CACHE_DIR,
DEFAULT_CONFIG_DIR,
abort_process,
calculate_pixels,
gen_cmd,
get_save_dir,
load_args,
load_config,
load_eval_results,
save_args,
save_cmd,
)
from .control import get_trainer_info
from .locales import ALERTS, LOCALES
if is_gradio_available():
import gradio as gr
if TYPE_CHECKING:
from gradio.components import Component
from .manager import Manager
class Runner:
def __init__(self, manager: "Manager", demo_mode: bool = False) -> None:
self.manager = manager
self.demo_mode = demo_mode
""" Resume """
self.trainer: Popen | None = None
self.do_train = True
self.running_data: dict[Component, Any] = None
""" State """
self.aborted = False
self.running = False
def set_abort(self) -> None:
self.aborted = True
if self.trainer is not None:
abort_process(self.trainer.pid)
def _initialize(self, data: dict["Component", Any], do_train: bool, from_preview: bool) -> str:
get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)]
lang, model_name, model_path = get("top.lang"), get("top.model_name"), get("top.model_path")
dataset = get("train.dataset") if do_train else get("eval.dataset")
if self.running:
return ALERTS["err_conflict"][lang]
if not model_name:
return ALERTS["err_no_model"][lang]
if not model_path:
return ALERTS["err_no_path"][lang]
if not dataset:
return ALERTS["err_no_dataset"][lang]
if not from_preview and self.demo_mode:
return ALERTS["err_demo"][lang]
if do_train:
if not get("train.output_dir"):
return ALERTS["err_no_output_dir"][lang]
try:
json.loads(get("train.extra_args"))
except json.JSONDecodeError:
return ALERTS["err_json_schema"][lang]
stage = TRAINING_STAGES[get("train.training_stage")]
if stage == "ppo" and not get("train.reward_model"):
return ALERTS["err_no_reward_model"][lang]
else:
if not get("eval.output_dir"):
return ALERTS["err_no_output_dir"][lang]
if not from_preview and not is_accelerator_available():
gr.Warning(ALERTS["warn_no_cuda"][lang])
return ""
def _finalize(self, lang: str, finish_info: str) -> None:
finish_info = ALERTS["info_aborted"][lang] if self.aborted else finish_info
gr.Info(finish_info)
self.trainer = None
self.aborted = False
self.running = False
self.running_data = None
torch_gc()
def _parse_train_args(self, data: dict["Component", Any]) -> dict[str, Any]:
get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)]
model_name, finetuning_type = get("top.model_name"), get("top.finetuning_type")
user_config = load_config()
args = dict(
stage=TRAINING_STAGES[get("train.training_stage")],
do_train=True,
model_name_or_path=get("top.model_path"),
cache_dir=user_config.get("cache_dir", None),
preprocessing_num_workers=16,
finetuning_type=finetuning_type,
template=get("top.template"),
rope_scaling=get("top.rope_scaling") if get("top.rope_scaling") != "none" else None,
flash_attn="fa2" if get("top.booster") == "flashattn2" else "auto",
use_unsloth=(get("top.booster") == "unsloth"),
enable_liger_kernel=(get("top.booster") == "liger_kernel"),
dataset_dir=get("train.dataset_dir"),
dataset=",".join(get("train.dataset")),
cutoff_len=get("train.cutoff_len"),
learning_rate=float(get("train.learning_rate")),
num_train_epochs=float(get("train.num_train_epochs")),
max_samples=int(get("train.max_samples")),
per_device_train_batch_size=get("train.batch_size"),
gradient_accumulation_steps=get("train.gradient_accumulation_steps"),
lr_scheduler_type=get("train.lr_scheduler_type"),
max_grad_norm=float(get("train.max_grad_norm")),
logging_steps=get("train.logging_steps"),
save_steps=get("train.save_steps"),
warmup_steps=get("train.warmup_steps"),
neftune_noise_alpha=get("train.neftune_alpha") or None,
packing=get("train.packing") or get("train.neat_packing"),
neat_packing=get("train.neat_packing"),
train_on_prompt=get("train.train_on_prompt"),
mask_history=get("train.mask_history"),
resize_vocab=get("train.resize_vocab"),
use_llama_pro=get("train.use_llama_pro"),
enable_thinking=get("train.enable_thinking"),
report_to=get("train.report_to"),
use_galore=get("train.use_galore"),
use_apollo=get("train.use_apollo"),
use_badam=get("train.use_badam"),
use_swanlab=get("train.use_swanlab"),
output_dir=get_save_dir(model_name, finetuning_type, get("train.output_dir")),
fp16=(get("train.compute_type") == "fp16"),
bf16=(get("train.compute_type") == "bf16"),
pure_bf16=(get("train.compute_type") == "pure_bf16"),
plot_loss=True,
trust_remote_code=True,
ddp_timeout=180000000,
include_num_input_tokens_seen=True,
)
args.update(json.loads(get("train.extra_args")))
# checkpoints
if get("top.checkpoint_path"):
if finetuning_type in PEFT_METHODS: # list
args["adapter_name_or_path"] = ",".join(
[get_save_dir(model_name, finetuning_type, adapter) for adapter in get("top.checkpoint_path")]
)
else: # str
args["model_name_or_path"] = get_save_dir(model_name, finetuning_type, get("top.checkpoint_path"))
# quantization
if get("top.quantization_bit") != "none":
args["quantization_bit"] = int(get("top.quantization_bit"))
args["quantization_method"] = get("top.quantization_method")
args["double_quantization"] = not is_torch_npu_available()
# freeze config
if args["finetuning_type"] == "freeze":
args["freeze_trainable_layers"] = get("train.freeze_trainable_layers")
args["freeze_trainable_modules"] = get("train.freeze_trainable_modules")
args["freeze_extra_modules"] = get("train.freeze_extra_modules") or None
# lora config
if args["finetuning_type"] == "lora":
args["lora_rank"] = get("train.lora_rank")
args["lora_alpha"] = get("train.lora_alpha")
args["lora_dropout"] = get("train.lora_dropout")
args["loraplus_lr_ratio"] = get("train.loraplus_lr_ratio") or None
args["create_new_adapter"] = get("train.create_new_adapter")
args["use_rslora"] = get("train.use_rslora")
args["use_dora"] = get("train.use_dora")
args["pissa_init"] = get("train.use_pissa")
args["pissa_convert"] = get("train.use_pissa")
args["lora_target"] = get("train.lora_target") or "all"
args["additional_target"] = get("train.additional_target") or None
if args["use_llama_pro"]:
args["freeze_trainable_layers"] = get("train.freeze_trainable_layers")
# rlhf config
if args["stage"] == "ppo":
if finetuning_type in PEFT_METHODS:
args["reward_model"] = ",".join(
[get_save_dir(model_name, finetuning_type, adapter) for adapter in get("train.reward_model")]
)
else:
args["reward_model"] = get_save_dir(model_name, finetuning_type, get("train.reward_model"))
args["reward_model_type"] = "lora" if finetuning_type == "lora" else "full"
args["ppo_score_norm"] = get("train.ppo_score_norm")
args["ppo_whiten_rewards"] = get("train.ppo_whiten_rewards")
args["top_k"] = 0
args["top_p"] = 0.9
elif args["stage"] in ["dpo", "kto"]:
args["pref_beta"] = get("train.pref_beta")
args["pref_ftx"] = get("train.pref_ftx")
args["pref_loss"] = get("train.pref_loss")
# multimodal config
if model_name in MULTIMODAL_SUPPORTED_MODELS:
args["freeze_vision_tower"] = get("train.freeze_vision_tower")
args["freeze_multi_modal_projector"] = get("train.freeze_multi_modal_projector")
args["freeze_language_model"] = get("train.freeze_language_model")
args["image_max_pixels"] = calculate_pixels(get("train.image_max_pixels"))
args["image_min_pixels"] = calculate_pixels(get("train.image_min_pixels"))
args["video_max_pixels"] = calculate_pixels(get("train.video_max_pixels"))
args["video_min_pixels"] = calculate_pixels(get("train.video_min_pixels"))
# galore config
if args["use_galore"]:
args["galore_rank"] = get("train.galore_rank")
args["galore_update_interval"] = get("train.galore_update_interval")
args["galore_scale"] = get("train.galore_scale")
args["galore_target"] = get("train.galore_target")
# apollo config
if args["use_apollo"]:
args["apollo_rank"] = get("train.apollo_rank")
args["apollo_update_interval"] = get("train.apollo_update_interval")
args["apollo_scale"] = get("train.apollo_scale")
args["apollo_target"] = get("train.apollo_target")
# badam config
if args["use_badam"]:
args["badam_mode"] = get("train.badam_mode")
args["badam_switch_mode"] = get("train.badam_switch_mode")
args["badam_switch_interval"] = get("train.badam_switch_interval")
args["badam_update_ratio"] = get("train.badam_update_ratio")
# swanlab config
if get("train.use_swanlab"):
args["swanlab_project"] = get("train.swanlab_project")
args["swanlab_run_name"] = get("train.swanlab_run_name")
args["swanlab_workspace"] = get("train.swanlab_workspace")
args["swanlab_api_key"] = get("train.swanlab_api_key")
args["swanlab_mode"] = get("train.swanlab_mode")
# eval config
if get("train.val_size") > 1e-6 and args["stage"] != "ppo":
args["val_size"] = get("train.val_size")
args["eval_strategy"] = "steps"
args["eval_steps"] = args["save_steps"]
args["per_device_eval_batch_size"] = args["per_device_train_batch_size"]
# ds config
if get("train.ds_stage") != "none":
ds_stage = get("train.ds_stage")
ds_offload = "offload_" if get("train.ds_offload") else ""
args["deepspeed"] = os.path.join(DEFAULT_CACHE_DIR, f"ds_z{ds_stage}_{ds_offload}config.json")
return args
def _parse_eval_args(self, data: dict["Component", Any]) -> dict[str, Any]:
get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)]
model_name, finetuning_type = get("top.model_name"), get("top.finetuning_type")
user_config = load_config()
args = dict(
stage="sft",
model_name_or_path=get("top.model_path"),
cache_dir=user_config.get("cache_dir", None),
preprocessing_num_workers=16,
finetuning_type=finetuning_type,
quantization_method=get("top.quantization_method"),
template=get("top.template"),
rope_scaling=get("top.rope_scaling") if get("top.rope_scaling") != "none" else None,
flash_attn="fa2" if get("top.booster") == "flashattn2" else "auto",
use_unsloth=(get("top.booster") == "unsloth"),
dataset_dir=get("eval.dataset_dir"),
eval_dataset=",".join(get("eval.dataset")),
cutoff_len=get("eval.cutoff_len"),
max_samples=int(get("eval.max_samples")),
per_device_eval_batch_size=get("eval.batch_size"),
predict_with_generate=True,
report_to="none",
max_new_tokens=get("eval.max_new_tokens"),
top_p=get("eval.top_p"),
temperature=get("eval.temperature"),
output_dir=get_save_dir(model_name, finetuning_type, get("eval.output_dir")),
trust_remote_code=True,
ddp_timeout=180000000,
)
if get("eval.predict"):
args["do_predict"] = True
else:
args["do_eval"] = True
# checkpoints
if get("top.checkpoint_path"):
if finetuning_type in PEFT_METHODS: # list
args["adapter_name_or_path"] = ",".join(
[get_save_dir(model_name, finetuning_type, adapter) for adapter in get("top.checkpoint_path")]
)
else: # str
args["model_name_or_path"] = get_save_dir(model_name, finetuning_type, get("top.checkpoint_path"))
# quantization
if get("top.quantization_bit") != "none":
args["quantization_bit"] = int(get("top.quantization_bit"))
args["quantization_method"] = get("top.quantization_method")
args["double_quantization"] = not is_torch_npu_available()
return args
def _preview(self, data: dict["Component", Any], do_train: bool) -> Generator[dict["Component", str], None, None]:
output_box = self.manager.get_elem_by_id("{}.output_box".format("train" if do_train else "eval"))
error = self._initialize(data, do_train, from_preview=True)
if error:
gr.Warning(error)
yield {output_box: error}
else:
args = self._parse_train_args(data) if do_train else self._parse_eval_args(data)
yield {output_box: gen_cmd(args)}
def _launch(self, data: dict["Component", Any], do_train: bool) -> Generator[dict["Component", Any], None, None]:
output_box = self.manager.get_elem_by_id("{}.output_box".format("train" if do_train else "eval"))
error = self._initialize(data, do_train, from_preview=False)
if error:
gr.Warning(error)
yield {output_box: error}
else:
self.do_train, self.running_data = do_train, data
args = self._parse_train_args(data) if do_train else self._parse_eval_args(data)
os.makedirs(args["output_dir"], exist_ok=True)
save_args(os.path.join(args["output_dir"], LLAMABOARD_CONFIG), self._build_config_dict(data))
env = deepcopy(os.environ)
env["LLAMABOARD_ENABLED"] = "1"
env["LLAMABOARD_WORKDIR"] = args["output_dir"]
if args.get("deepspeed", None) is not None:
env["FORCE_TORCHRUN"] = "1"
# NOTE: DO NOT USE shell=True to avoid security risk
self.trainer = Popen(["llamafactory-cli", "train", save_cmd(args)], env=env, stderr=PIPE, text=True)
yield from self.monitor()
def _build_config_dict(self, data: dict["Component", Any]) -> dict[str, Any]:
config_dict = {}
skip_ids = ["top.lang", "top.model_path", "train.output_dir", "train.config_path"]
for elem, value in data.items():
elem_id = self.manager.get_id_by_elem(elem)
if elem_id not in skip_ids:
config_dict[elem_id] = value
return config_dict
def preview_train(self, data):
yield from self._preview(data, do_train=True)
def preview_eval(self, data):
yield from self._preview(data, do_train=False)
def run_train(self, data):
yield from self._launch(data, do_train=True)
def run_eval(self, data):
yield from self._launch(data, do_train=False)
def monitor(self):
self.aborted = False
self.running = True
get = lambda elem_id: self.running_data[self.manager.get_elem_by_id(elem_id)]
lang, model_name, finetuning_type = get("top.lang"), get("top.model_name"), get("top.finetuning_type")
output_dir = get("{}.output_dir".format("train" if self.do_train else "eval"))
output_path = get_save_dir(model_name, finetuning_type, output_dir)
output_box = self.manager.get_elem_by_id("{}.output_box".format("train" if self.do_train else "eval"))
progress_bar = self.manager.get_elem_by_id("{}.progress_bar".format("train" if self.do_train else "eval"))
loss_viewer = self.manager.get_elem_by_id("train.loss_viewer") if self.do_train else None
swanlab_link = self.manager.get_elem_by_id("train.swanlab_link") if self.do_train else None
running_log = ""
return_code = -1
while return_code == -1:
if self.aborted:
yield {
output_box: ALERTS["info_aborting"][lang],
progress_bar: gr.Slider(visible=False),
}
else:
running_log, running_progress, running_info = get_trainer_info(lang, output_path, self.do_train)
return_dict = {
output_box: running_log,
progress_bar: running_progress,
}
if "loss_viewer" in running_info:
return_dict[loss_viewer] = running_info["loss_viewer"]
if "swanlab_link" in running_info:
return_dict[swanlab_link] = running_info["swanlab_link"]
yield return_dict
try:
stderr = self.trainer.communicate(timeout=2)[1]
return_code = self.trainer.returncode
except TimeoutExpired:
continue
if return_code == 0 or self.aborted:
finish_info = ALERTS["info_finished"][lang]
if self.do_train:
finish_log = ALERTS["info_finished"][lang] + "\n\n" + running_log
else:
finish_log = load_eval_results(os.path.join(output_path, "all_results.json")) + "\n\n" + running_log
else:
print(stderr)
finish_info = ALERTS["err_failed"][lang]
finish_log = ALERTS["err_failed"][lang] + f" Exit code: {return_code}\n\n```\n{stderr}\n```\n"
self._finalize(lang, finish_info)
return_dict = {output_box: finish_log, progress_bar: gr.Slider(visible=False)}
yield return_dict
def save_args(self, data):
output_box = self.manager.get_elem_by_id("train.output_box")
error = self._initialize(data, do_train=True, from_preview=True)
if error:
gr.Warning(error)
return {output_box: error}
lang = data[self.manager.get_elem_by_id("top.lang")]
config_path = data[self.manager.get_elem_by_id("train.config_path")]
os.makedirs(DEFAULT_CONFIG_DIR, exist_ok=True)
save_path = os.path.join(DEFAULT_CONFIG_DIR, config_path)
save_args(save_path, self._build_config_dict(data))
return {output_box: ALERTS["info_config_saved"][lang] + save_path}
def load_args(self, lang: str, config_path: str):
output_box = self.manager.get_elem_by_id("train.output_box")
config_dict = load_args(os.path.join(DEFAULT_CONFIG_DIR, config_path))
if config_dict is None:
gr.Warning(ALERTS["err_config_not_found"][lang])
return {output_box: ALERTS["err_config_not_found"][lang]}
output_dict: dict[Component, Any] = {output_box: ALERTS["info_config_loaded"][lang]}
for elem_id, value in config_dict.items():
output_dict[self.manager.get_elem_by_id(elem_id)] = value
return output_dict
def check_output_dir(self, lang: str, model_name: str, finetuning_type: str, output_dir: str):
output_box = self.manager.get_elem_by_id("train.output_box")
output_dict: dict[Component, Any] = {output_box: LOCALES["output_box"][lang]["value"]}
if model_name and output_dir and os.path.isdir(get_save_dir(model_name, finetuning_type, output_dir)):
gr.Warning(ALERTS["warn_output_dir_exists"][lang])
output_dict[output_box] = ALERTS["warn_output_dir_exists"][lang]
output_dir = get_save_dir(model_name, finetuning_type, output_dir)
config_dict = load_args(os.path.join(output_dir, LLAMABOARD_CONFIG)) # load llamaboard config
for elem_id, value in config_dict.items():
output_dict[self.manager.get_elem_by_id(elem_id)] = value
return output_dict | --- +++ @@ -52,8 +52,10 @@
class Runner:
+ r"""A class to manage the running status of the trainers."""
def __init__(self, manager: "Manager", demo_mode: bool = False) -> None:
+ r"""Init a runner."""
self.manager = manager
self.demo_mode = demo_mode
""" Resume """
@@ -70,6 +72,7 @@ abort_process(self.trainer.pid)
def _initialize(self, data: dict["Component", Any], do_train: bool, from_preview: bool) -> str:
+ r"""Validate the configuration."""
get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)]
lang, model_name, model_path = get("top.lang"), get("top.model_name"), get("top.model_path")
dataset = get("train.dataset") if do_train else get("eval.dataset")
@@ -111,6 +114,7 @@ return ""
def _finalize(self, lang: str, finish_info: str) -> None:
+ r"""Clean the cached memory and resets the runner."""
finish_info = ALERTS["info_aborted"][lang] if self.aborted else finish_info
gr.Info(finish_info)
self.trainer = None
@@ -120,6 +124,7 @@ torch_gc()
def _parse_train_args(self, data: dict["Component", Any]) -> dict[str, Any]:
+ r"""Build and validate the training arguments."""
get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)]
model_name, finetuning_type = get("top.model_name"), get("top.finetuning_type")
user_config = load_config()
@@ -285,6 +290,7 @@ return args
def _parse_eval_args(self, data: dict["Component", Any]) -> dict[str, Any]:
+ r"""Build and validate the evaluation arguments."""
get = lambda elem_id: data[self.manager.get_elem_by_id(elem_id)]
model_name, finetuning_type = get("top.model_name"), get("top.finetuning_type")
user_config = load_config()
@@ -338,6 +344,7 @@ return args
def _preview(self, data: dict["Component", Any], do_train: bool) -> Generator[dict["Component", str], None, None]:
+ r"""Preview the training commands."""
output_box = self.manager.get_elem_by_id("{}.output_box".format("train" if do_train else "eval"))
error = self._initialize(data, do_train, from_preview=True)
if error:
@@ -348,6 +355,7 @@ yield {output_box: gen_cmd(args)}
def _launch(self, data: dict["Component", Any], do_train: bool) -> Generator[dict["Component", Any], None, None]:
+ r"""Start the training process."""
output_box = self.manager.get_elem_by_id("{}.output_box".format("train" if do_train else "eval"))
error = self._initialize(data, do_train, from_preview=False)
if error:
@@ -371,6 +379,7 @@ yield from self.monitor()
def _build_config_dict(self, data: dict["Component", Any]) -> dict[str, Any]:
+ r"""Build a dictionary containing the current training configuration."""
config_dict = {}
skip_ids = ["top.lang", "top.model_path", "train.output_dir", "train.config_path"]
for elem, value in data.items():
@@ -393,6 +402,7 @@ yield from self._launch(data, do_train=False)
def monitor(self):
+ r"""Monitorgit the training progress and logs."""
self.aborted = False
self.running = True
@@ -450,6 +460,7 @@ yield return_dict
def save_args(self, data):
+ r"""Save the training configuration to config path."""
output_box = self.manager.get_elem_by_id("train.output_box")
error = self._initialize(data, do_train=True, from_preview=True)
if error:
@@ -465,6 +476,7 @@ return {output_box: ALERTS["info_config_saved"][lang] + save_path}
def load_args(self, lang: str, config_path: str):
+ r"""Load the training configuration from config path."""
output_box = self.manager.get_elem_by_id("train.output_box")
config_dict = load_args(os.path.join(DEFAULT_CONFIG_DIR, config_path))
if config_dict is None:
@@ -478,6 +490,7 @@ return output_dict
def check_output_dir(self, lang: str, model_name: str, finetuning_type: str, output_dir: str):
+ r"""Restore the training status if output_dir exists."""
output_box = self.manager.get_elem_by_id("train.output_box")
output_dict: dict[Component, Any] = {output_box: LOCALES["output_box"][lang]["value"]}
if model_name and output_dir and os.path.isdir(get_save_dir(model_name, finetuning_type, output_dir)):
@@ -489,4 +502,4 @@ for elem_id, value in config_dict.items():
output_dict[self.manager.get_elem_by_id(elem_id)] = value
- return output_dict+ return output_dict
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/webui/runner.py |
Write clean docstrings for readability | from __future__ import annotations
import ipaddress
import itertools
import logging
from collections import deque
from typing import TYPE_CHECKING, Any, cast
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.errors import ErrorCodes
from h2.events import (
ConnectionTerminated,
DataReceived,
Event,
ResponseReceived,
SettingsAcknowledged,
StreamEnded,
StreamReset,
UnknownFrameReceived,
WindowUpdated,
)
from h2.exceptions import FrameTooLargeError, H2Error
from twisted.internet.interfaces import (
IAddress,
IHandshakeListener,
IProtocolNegotiationFactory,
)
from twisted.internet.protocol import Factory, Protocol, connectionDone
from twisted.internet.ssl import Certificate
from twisted.protocols.policies import TimeoutMixin
from zope.interface import implementer
from scrapy.core.http2.stream import Stream, StreamCloseReason
from scrapy.exceptions import DownloadTimeoutError
from scrapy.http import Request, Response
from scrapy.utils.deprecate import warn_on_deprecated_spider_attribute
if TYPE_CHECKING:
from ipaddress import IPv4Address, IPv6Address
from hpack import HeaderTuple
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.web.client import URI
from scrapy.settings import Settings
from scrapy.spiders import Spider
logger = logging.getLogger(__name__)
PROTOCOL_NAME = b"h2"
class InvalidNegotiatedProtocol(H2Error):
def __init__(self, negotiated_protocol: bytes) -> None:
self.negotiated_protocol = negotiated_protocol
def __str__(self) -> str:
return f"Expected {PROTOCOL_NAME!r}, received {self.negotiated_protocol!r}"
class RemoteTerminatedConnection(H2Error):
def __init__(
self,
remote_ip_address: IPv4Address | IPv6Address | None,
event: ConnectionTerminated,
) -> None:
self.remote_ip_address = remote_ip_address
self.terminate_event = event
def __str__(self) -> str:
return f"Received GOAWAY frame from {self.remote_ip_address!r}"
class MethodNotAllowed405(H2Error):
def __init__(self, remote_ip_address: IPv4Address | IPv6Address | None) -> None:
self.remote_ip_address = remote_ip_address
def __str__(self) -> str:
return f"Received 'HTTP/2.0 405 Method Not Allowed' from {self.remote_ip_address!r}"
@implementer(IHandshakeListener)
class H2ClientProtocol(Protocol, TimeoutMixin):
IDLE_TIMEOUT = 240
def __init__(
self,
uri: URI,
settings: Settings,
conn_lost_deferred: Deferred[list[BaseException]],
) -> None:
self._conn_lost_deferred: Deferred[list[BaseException]] = conn_lost_deferred
config = H2Configuration(client_side=True, header_encoding="utf-8")
self.conn = H2Connection(config=config)
# ID of the next request stream
# Following the convention - 'Streams initiated by a client MUST
# use odd-numbered stream identifiers' (RFC 7540 - Section 5.1.1)
self._stream_id_generator = itertools.count(start=1, step=2)
# Streams are stored in a dictionary keyed off their stream IDs
self.streams: dict[int, Stream] = {}
# If requests are received before connection is made we keep
# all requests in a pool and send them as the connection is made
self._pending_request_stream_pool: deque[Stream] = deque()
# Save an instance of errors raised which lead to losing the connection
# We pass these instances to the streams ResponseFailed() failure
self._conn_lost_errors: list[BaseException] = []
# Some meta data of this connection
# initialized when connection is successfully made
self.metadata: dict[str, Any] = {
# Peer certificate instance
"certificate": None,
# Address of the server we are connected to which
# is updated when HTTP/2 connection is made successfully
"ip_address": None,
# URI of the peer HTTP/2 connection is made
"uri": uri,
# Both ip_address and uri are used by the Stream before
# initiating the request to verify that the base address
# Variables taken from Project Settings
"default_download_maxsize": settings.getint("DOWNLOAD_MAXSIZE"),
"default_download_warnsize": settings.getint("DOWNLOAD_WARNSIZE"),
# Counter to keep track of opened streams. This counter
# is used to make sure that not more than MAX_CONCURRENT_STREAMS
# streams are opened which leads to ProtocolError
# We use simple FIFO policy to handle pending requests
"active_streams": 0,
# Flag to keep track if settings were acknowledged by the remote
# This ensures that we have established a HTTP/2 connection
"settings_acknowledged": False,
}
@property
def h2_connected(self) -> bool:
assert self.transport is not None # typing
return bool(self.transport.connected) and self.metadata["settings_acknowledged"]
@property
def allowed_max_concurrent_streams(self) -> int:
return min(
self.conn.local_settings.max_concurrent_streams,
self.conn.remote_settings.max_concurrent_streams,
)
def _send_pending_requests(self) -> None:
while (
self._pending_request_stream_pool
and self.metadata["active_streams"] < self.allowed_max_concurrent_streams
and self.h2_connected
):
self.metadata["active_streams"] += 1
stream = self._pending_request_stream_pool.popleft()
stream.initiate_request()
self._write_to_transport()
def pop_stream(self, stream_id: int) -> Stream:
stream = self.streams.pop(stream_id)
self.metadata["active_streams"] -= 1
self._send_pending_requests()
return stream
def _new_stream(self, request: Request, spider: Spider) -> Stream:
if hasattr(spider, "download_maxsize"): # pragma: no cover
warn_on_deprecated_spider_attribute("download_maxsize", "DOWNLOAD_MAXSIZE")
if hasattr(spider, "download_warnsize"): # pragma: no cover
warn_on_deprecated_spider_attribute(
"download_warnsize", "DOWNLOAD_WARNSIZE"
)
stream = Stream(
stream_id=next(self._stream_id_generator),
request=request,
protocol=self,
download_maxsize=getattr(
spider, "download_maxsize", self.metadata["default_download_maxsize"]
),
download_warnsize=getattr(
spider, "download_warnsize", self.metadata["default_download_warnsize"]
),
)
self.streams[stream.stream_id] = stream
return stream
def _write_to_transport(self) -> None:
assert self.transport is not None # typing
# Reset the idle timeout as connection is still actively sending data
self.resetTimeout()
data = self.conn.data_to_send()
self.transport.write(data)
def request(self, request: Request, spider: Spider) -> Deferred[Response]:
if not isinstance(request, Request):
raise TypeError(
f"Expected scrapy.http.Request, received {request.__class__.__qualname__}"
)
stream = self._new_stream(request, spider)
d: Deferred[Response] = stream.get_response()
# Add the stream to the request pool
self._pending_request_stream_pool.append(stream)
# If we receive a request when connection is idle
# We need to initiate pending requests
self._send_pending_requests()
return d
def connectionMade(self) -> None:
# Initialize the timeout
self.setTimeout(self.IDLE_TIMEOUT)
assert self.transport is not None # typing
destination = self.transport.getPeer()
self.metadata["ip_address"] = ipaddress.ip_address(destination.host)
# Initiate H2 Connection
self.conn.initiate_connection()
self._write_to_transport()
def _lose_connection_with_error(self, errors: list[BaseException]) -> None:
self._conn_lost_errors += errors
assert self.transport is not None # typing
self.transport.loseConnection()
def handshakeCompleted(self) -> None:
assert self.transport is not None # typing
if (
self.transport.negotiatedProtocol is not None
and self.transport.negotiatedProtocol != PROTOCOL_NAME
):
# we have not initiated the connection yet, no need to send a GOAWAY frame to the remote peer
self._lose_connection_with_error(
[InvalidNegotiatedProtocol(self.transport.negotiatedProtocol)]
)
def _check_received_data(self, data: bytes) -> None:
if data.startswith(b"HTTP/2.0 405 Method Not Allowed"):
raise MethodNotAllowed405(self.metadata["ip_address"])
def dataReceived(self, data: bytes) -> None:
# Reset the idle timeout as connection is still actively receiving data
self.resetTimeout()
try:
self._check_received_data(data)
events = self.conn.receive_data(data)
self._handle_events(events)
except H2Error as e:
if isinstance(e, FrameTooLargeError):
# hyper-h2 does not drop the connection in this scenario, we
# need to abort the connection manually.
self._conn_lost_errors += [e]
assert self.transport is not None # typing
self.transport.abortConnection()
return
# Save this error as ultimately the connection will be dropped
# internally by hyper-h2. Saved error will be passed to all the streams
# closed with the connection.
self._lose_connection_with_error([e])
finally:
self._write_to_transport()
def timeoutConnection(self) -> None:
# Check whether there are open streams. If there are, we're going to
# want to use the error code PROTOCOL_ERROR. If there aren't, use
# NO_ERROR.
if (
self.conn.open_outbound_streams > 0
or self.conn.open_inbound_streams > 0
or self.metadata["active_streams"] > 0
):
error_code = ErrorCodes.PROTOCOL_ERROR
else:
error_code = ErrorCodes.NO_ERROR
self.conn.close_connection(error_code=error_code)
self._write_to_transport()
self._lose_connection_with_error(
[
DownloadTimeoutError(
f"Connection was IDLE for more than {self.IDLE_TIMEOUT}s"
)
]
)
def connectionLost(self, reason: Failure = connectionDone) -> None:
# Cancel the timeout if not done yet
self.setTimeout(None)
# Notify the connection pool instance such that no new requests are
# sent over current connection
if not reason.check(connectionDone):
self._conn_lost_errors.append(reason)
self._conn_lost_deferred.callback(self._conn_lost_errors)
for stream in self.streams.values():
if stream.metadata["request_sent"]:
close_reason = StreamCloseReason.CONNECTION_LOST
else:
close_reason = StreamCloseReason.INACTIVE
stream.close(close_reason, self._conn_lost_errors, from_protocol=True)
self.metadata["active_streams"] -= len(self.streams)
self.streams.clear()
self._pending_request_stream_pool.clear()
self.conn.close_connection()
def _handle_events(self, events: list[Event]) -> None:
for event in events:
if isinstance(event, ConnectionTerminated):
self.connection_terminated(event)
elif isinstance(event, DataReceived):
self.data_received(event)
elif isinstance(event, ResponseReceived):
self.response_received(event)
elif isinstance(event, StreamEnded):
self.stream_ended(event)
elif isinstance(event, StreamReset):
self.stream_reset(event)
elif isinstance(event, WindowUpdated):
self.window_updated(event)
elif isinstance(event, SettingsAcknowledged):
self.settings_acknowledged(event)
elif isinstance(event, UnknownFrameReceived):
logger.warning("Unknown frame received: %s", event.frame)
# Event handler functions starts here
def connection_terminated(self, event: ConnectionTerminated) -> None:
self._lose_connection_with_error(
[RemoteTerminatedConnection(self.metadata["ip_address"], event)]
)
def data_received(self, event: DataReceived) -> None:
try:
stream = self.streams[event.stream_id]
except KeyError:
pass # We ignore server-initiated events
else:
stream.receive_data(event.data, event.flow_controlled_length)
def response_received(self, event: ResponseReceived) -> None:
try:
stream = self.streams[event.stream_id]
except KeyError:
pass # We ignore server-initiated events
else:
stream.receive_headers(cast("list[HeaderTuple]", event.headers))
def settings_acknowledged(self, event: SettingsAcknowledged) -> None:
self.metadata["settings_acknowledged"] = True
# Send off all the pending requests as now we have
# established a proper HTTP/2 connection
self._send_pending_requests()
# Update certificate when our HTTP/2 connection is established
assert self.transport is not None # typing
self.metadata["certificate"] = Certificate(self.transport.getPeerCertificate())
def stream_ended(self, event: StreamEnded) -> None:
try:
stream = self.pop_stream(event.stream_id)
except KeyError:
pass # We ignore server-initiated events
else:
stream.close(StreamCloseReason.ENDED, from_protocol=True)
def stream_reset(self, event: StreamReset) -> None:
try:
stream = self.pop_stream(event.stream_id)
except KeyError:
pass # We ignore server-initiated events
else:
stream.close(StreamCloseReason.RESET, from_protocol=True)
def window_updated(self, event: WindowUpdated) -> None:
if event.stream_id != 0:
self.streams[event.stream_id].receive_window_update()
else:
# Send leftover data for all the streams
for stream in self.streams.values():
stream.receive_window_update()
@implementer(IProtocolNegotiationFactory)
class H2ClientFactory(Factory):
def __init__(
self,
uri: URI,
settings: Settings,
conn_lost_deferred: Deferred[list[BaseException]],
) -> None:
self.uri = uri
self.settings = settings
self.conn_lost_deferred = conn_lost_deferred
def buildProtocol(self, addr: IAddress) -> H2ClientProtocol:
return H2ClientProtocol(self.uri, self.settings, self.conn_lost_deferred)
def acceptableProtocols(self) -> list[bytes]:
return [PROTOCOL_NAME] | --- +++ @@ -93,6 +93,15 @@ settings: Settings,
conn_lost_deferred: Deferred[list[BaseException]],
) -> None:
+ """
+ Arguments:
+ uri -- URI of the base url to which HTTP/2 Connection will be made.
+ uri is used to verify that incoming client requests have correct
+ base URL.
+ settings -- Scrapy project settings
+ conn_lost_deferred -- Deferred fires with the reason: Failure to notify
+ that connection was lost
+ """
self._conn_lost_deferred: Deferred[list[BaseException]] = conn_lost_deferred
config = H2Configuration(client_side=True, header_encoding="utf-8")
@@ -141,17 +150,30 @@
@property
def h2_connected(self) -> bool:
+ """Boolean to keep track of the connection status.
+ This is used while initiating pending streams to make sure
+ that we initiate stream only during active HTTP/2 Connection
+ """
assert self.transport is not None # typing
return bool(self.transport.connected) and self.metadata["settings_acknowledged"]
@property
def allowed_max_concurrent_streams(self) -> int:
+ """We keep total two streams for client (sending data) and
+ server side (receiving data) for a single request. To be safe
+ we choose the minimum. Since this value can change in event
+ RemoteSettingsChanged we make variable a property.
+ """
return min(
self.conn.local_settings.max_concurrent_streams,
self.conn.remote_settings.max_concurrent_streams,
)
def _send_pending_requests(self) -> None:
+ """Initiate all pending requests from the deque following FIFO
+ We make sure that at any time {allowed_max_concurrent_streams}
+ streams are active.
+ """
while (
self._pending_request_stream_pool
and self.metadata["active_streams"] < self.allowed_max_concurrent_streams
@@ -163,12 +185,14 @@ self._write_to_transport()
def pop_stream(self, stream_id: int) -> Stream:
+ """Perform cleanup when a stream is closed"""
stream = self.streams.pop(stream_id)
self.metadata["active_streams"] -= 1
self._send_pending_requests()
return stream
def _new_stream(self, request: Request, spider: Spider) -> Stream:
+ """Instantiates a new Stream object"""
if hasattr(spider, "download_maxsize"): # pragma: no cover
warn_on_deprecated_spider_attribute("download_maxsize", "DOWNLOAD_MAXSIZE")
if hasattr(spider, "download_warnsize"): # pragma: no cover
@@ -191,6 +215,9 @@ return stream
def _write_to_transport(self) -> None:
+ """Write data to the underlying transport connection
+ from the HTTP2 connection instance if any
+ """
assert self.transport is not None # typing
# Reset the idle timeout as connection is still actively sending data
self.resetTimeout()
@@ -216,6 +243,9 @@ return d
def connectionMade(self) -> None:
+ """Called by Twisted when the connection is established. We can start
+ sending some data now: we should open with the connection preamble.
+ """
# Initialize the timeout
self.setTimeout(self.IDLE_TIMEOUT)
@@ -228,11 +258,16 @@ self._write_to_transport()
def _lose_connection_with_error(self, errors: list[BaseException]) -> None:
+ """Helper function to lose the connection with the error sent as a
+ reason"""
self._conn_lost_errors += errors
assert self.transport is not None # typing
self.transport.loseConnection()
def handshakeCompleted(self) -> None:
+ """
+ Close the connection if it's not made via the expected protocol
+ """
assert self.transport is not None # typing
if (
self.transport.negotiatedProtocol is not None
@@ -244,6 +279,12 @@ )
def _check_received_data(self, data: bytes) -> None:
+ """Checks for edge cases where the connection to remote fails
+ without raising an appropriate H2Error
+
+ Arguments:
+ data -- Data received from the remote
+ """
if data.startswith(b"HTTP/2.0 405 Method Not Allowed"):
raise MethodNotAllowed405(self.metadata["ip_address"])
@@ -272,6 +313,8 @@ self._write_to_transport()
def timeoutConnection(self) -> None:
+ """Called when the connection times out.
+ We lose the connection with DownloadTimeoutError"""
# Check whether there are open streams. If there are, we're going to
# want to use the error code PROTOCOL_ERROR. If there aren't, use
@@ -296,6 +339,9 @@ )
def connectionLost(self, reason: Failure = connectionDone) -> None:
+ """Called by Twisted when the transport connection is lost.
+ No need to write anything to transport here.
+ """
# Cancel the timeout if not done yet
self.setTimeout(None)
@@ -319,6 +365,12 @@ self.conn.close_connection()
def _handle_events(self, events: list[Event]) -> None:
+ """Private method which acts as a bridge between the events
+ received from the HTTP/2 data and IH2EventsHandler
+
+ Arguments:
+ events -- A list of events that the remote peer triggered by sending data
+ """
for event in events:
if isinstance(event, ConnectionTerminated):
self.connection_terminated(event)
@@ -411,4 +463,4 @@ return H2ClientProtocol(self.uri, self.settings, self.conn_lost_deferred)
def acceptableProtocols(self) -> list[bytes]:
- return [PROTOCOL_NAME]+ return [PROTOCOL_NAME]
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/http2/protocol.py |
Create Google-style docstrings for my code | from __future__ import annotations
import logging
from enum import Enum
from io import BytesIO
from typing import TYPE_CHECKING, Any
from h2.errors import ErrorCodes
from h2.exceptions import H2Error, ProtocolError, StreamClosedError
from twisted.internet.defer import Deferred
from twisted.internet.error import ConnectionClosed
from twisted.python.failure import Failure
from twisted.web.client import ResponseFailed
from scrapy.exceptions import DownloadCancelledError
from scrapy.http.headers import Headers
from scrapy.utils._download_handlers import (
get_maxsize_msg,
get_warnsize_msg,
make_response,
)
from scrapy.utils.httpobj import urlparse_cached
if TYPE_CHECKING:
from hpack import HeaderTuple
from scrapy.core.http2.protocol import H2ClientProtocol
from scrapy.http import Request, Response
logger = logging.getLogger(__name__)
class InactiveStreamClosed(ConnectionClosed):
def __init__(self, request: Request) -> None:
self.request = request
def __str__(self) -> str:
return f"InactiveStreamClosed: Connection was closed without sending the request {self.request!r}"
class InvalidHostname(H2Error):
def __init__(
self, request: Request, expected_hostname: str, expected_netloc: str
) -> None:
self.request = request
self.expected_hostname = expected_hostname
self.expected_netloc = expected_netloc
def __str__(self) -> str:
return f"InvalidHostname: Expected {self.expected_hostname} or {self.expected_netloc} in {self.request}"
class StreamCloseReason(Enum):
# Received a StreamEnded event from the remote
ENDED = 1
# Received a StreamReset event -- ended abruptly
RESET = 2
# Transport connection was lost
CONNECTION_LOST = 3
# Expected response body size is more than allowed limit
MAXSIZE_EXCEEDED = 4
# Response deferred is cancelled by the client
# (happens when client called response_deferred.cancel())
CANCELLED = 5
# Connection lost and the stream was not initiated
INACTIVE = 6
# The hostname of the request is not same as of connected peer hostname
# As a result sending this request will the end the connection
INVALID_HOSTNAME = 7
# Actual response body size is more than allowed limit
MAXSIZE_EXCEEDED_ACTUAL = 8
class Stream:
def __init__(
self,
stream_id: int,
request: Request,
protocol: H2ClientProtocol,
download_maxsize: int = 0,
download_warnsize: int = 0,
) -> None:
self.stream_id: int = stream_id
self._request: Request = request
self._protocol: H2ClientProtocol = protocol
self._download_maxsize = self._request.meta.get(
"download_maxsize", download_maxsize
)
self._download_warnsize = self._request.meta.get(
"download_warnsize", download_warnsize
)
# Metadata of an HTTP/2 connection stream
# initialized when stream is instantiated
self.metadata: dict[str, Any] = {
"request_content_length": (
0 if self._request.body is None else len(self._request.body)
),
# Flag to keep track whether the stream has initiated the request
"request_sent": False,
# Flag to track whether we have logged about exceeding download warnsize
"reached_warnsize": False,
# Each time we send a data frame, we will decrease value by the amount send.
"remaining_content_length": (
0 if self._request.body is None else len(self._request.body)
),
# Flag to keep track whether client (self) have closed this stream
"stream_closed_local": False,
# Flag to keep track whether the server has closed the stream
"stream_closed_server": False,
}
# Private variable used to build the response
# this response is then converted to appropriate Response class
# passed to the response deferred callback
self._response: dict[str, Any] = {
# Data received frame by frame from the server is appended
# and passed to the response Deferred when completely received.
"body": BytesIO(),
# The amount of data received that counts against the
# flow control window
"flow_controlled_size": 0,
# Headers received after sending the request
"headers": Headers({}),
}
def _cancel(_: Any) -> None:
# Close this stream as gracefully as possible
# If the associated request is initiated we reset this stream
# else we directly call close() method
if self.metadata["request_sent"]:
self.reset_stream(StreamCloseReason.CANCELLED)
else:
self.close(StreamCloseReason.CANCELLED)
self._deferred_response: Deferred[Response] = Deferred(_cancel)
def __repr__(self) -> str:
return f"Stream(id={self.stream_id!r})"
@property
def _log_warnsize(self) -> bool:
content_length_header = int(
self._response["headers"].get(b"Content-Length", -1)
)
return (
self._download_warnsize
and (
self._response["flow_controlled_size"] > self._download_warnsize
or content_length_header > self._download_warnsize
)
and not self.metadata["reached_warnsize"]
)
def get_response(self) -> Deferred[Response]:
return self._deferred_response
def check_request_url(self) -> bool:
# Make sure that we are sending the request to the correct URL
url = urlparse_cached(self._request)
return (
url.netloc == str(self._protocol.metadata["uri"].host, "utf-8")
or url.netloc == str(self._protocol.metadata["uri"].netloc, "utf-8")
or url.netloc
== f"{self._protocol.metadata['ip_address']}:{self._protocol.metadata['uri'].port}"
)
def _get_request_headers(self) -> list[tuple[str, str]]:
url = urlparse_cached(self._request)
path = url.path
if url.query:
path += "?" + url.query
# This pseudo-header field MUST NOT be empty for "http" or "https"
# URIs; "http" or "https" URIs that do not contain a path component
# MUST include a value of '/'. The exception to this rule is an
# OPTIONS request for an "http" or "https" URI that does not include
# a path component; these MUST include a ":path" pseudo-header field
# with a value of '*' (refer RFC 7540 - Section 8.1.2.3)
if not path:
path = "*" if self._request.method == "OPTIONS" else "/"
# Make sure pseudo-headers comes before all the other headers
headers = [
(":method", self._request.method),
(":authority", url.netloc),
]
# The ":scheme" and ":path" pseudo-header fields MUST
# be omitted for CONNECT method (refer RFC 7540 - Section 8.3)
if self._request.method != "CONNECT":
headers += [
(":scheme", self._protocol.metadata["uri"].scheme),
(":path", path),
]
content_length = str(len(self._request.body))
headers.append(("Content-Length", content_length))
content_length_name = self._request.headers.normkey(b"Content-Length")
for name, values in self._request.headers.items():
for value_bytes in values:
value = str(value_bytes, "utf-8")
if name == content_length_name:
if value != content_length:
logger.warning(
"Ignoring bad Content-Length header %r of request %r, "
"sending %r instead",
value,
self._request,
content_length,
)
continue
headers.append((str(name, "utf-8"), value))
return headers
def initiate_request(self) -> None:
if self.check_request_url():
headers = self._get_request_headers()
self._protocol.conn.send_headers(self.stream_id, headers, end_stream=False)
self.metadata["request_sent"] = True
self.send_data()
else:
# Close this stream calling the response errback
# Note that we have not sent any headers
self.close(StreamCloseReason.INVALID_HOSTNAME)
def send_data(self) -> None:
if self.metadata["stream_closed_local"]:
raise StreamClosedError(self.stream_id)
# Firstly, check what the flow control window is for current stream.
window_size = self._protocol.conn.local_flow_control_window(
stream_id=self.stream_id
)
# Next, check what the maximum frame size is.
max_frame_size = self._protocol.conn.max_outbound_frame_size
# We will send no more than the window size or the remaining file size
# of data in this call, whichever is smaller.
bytes_to_send_size = min(window_size, self.metadata["remaining_content_length"])
# We now need to send a number of data frames.
while bytes_to_send_size > 0:
chunk_size = min(bytes_to_send_size, max_frame_size)
data_chunk_start_id = (
self.metadata["request_content_length"]
- self.metadata["remaining_content_length"]
)
data_chunk = self._request.body[
data_chunk_start_id : data_chunk_start_id + chunk_size
]
self._protocol.conn.send_data(self.stream_id, data_chunk, end_stream=False)
bytes_to_send_size -= chunk_size
self.metadata["remaining_content_length"] -= chunk_size
self.metadata["remaining_content_length"] = max(
0, self.metadata["remaining_content_length"]
)
# End the stream if no more data needs to be send
if self.metadata["remaining_content_length"] == 0:
self._protocol.conn.end_stream(self.stream_id)
# Q. What about the rest of the data?
# Ans: Remaining Data frames will be sent when we get a WindowUpdate frame
def receive_window_update(self) -> None:
if (
self.metadata["remaining_content_length"]
and not self.metadata["stream_closed_server"]
and self.metadata["request_sent"]
):
self.send_data()
def receive_data(self, data: bytes, flow_controlled_length: int) -> None:
self._response["body"].write(data)
self._response["flow_controlled_size"] += flow_controlled_length
# We check maxsize here in case the Content-Length header was not received
if (
self._download_maxsize
and self._response["flow_controlled_size"] > self._download_maxsize
):
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED_ACTUAL)
return
if self._log_warnsize:
self.metadata["reached_warnsize"] = True
warning_msg = get_warnsize_msg(
self._response["flow_controlled_size"],
self._download_warnsize,
self._request,
expected=False,
)
logger.warning(warning_msg)
# Acknowledge the data received
self._protocol.conn.acknowledge_received_data(
self._response["flow_controlled_size"], self.stream_id
)
def receive_headers(self, headers: list[HeaderTuple]) -> None:
for name, value in headers:
self._response["headers"].appendlist(name, value)
# Check if we exceed the allowed max data size which can be received
expected_size = int(self._response["headers"].get(b"Content-Length", -1))
if self._download_maxsize and expected_size > self._download_maxsize:
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED)
return
if self._log_warnsize:
self.metadata["reached_warnsize"] = True
warning_msg = get_warnsize_msg(
expected_size, self._download_warnsize, self._request, expected=True
)
logger.warning(warning_msg)
def reset_stream(self, reason: StreamCloseReason = StreamCloseReason.RESET) -> None:
if self.metadata["stream_closed_local"]:
raise StreamClosedError(self.stream_id)
# Clear buffer earlier to avoid keeping data in memory for a long time
self._response["body"].truncate(0)
self.metadata["stream_closed_local"] = True
self._protocol.conn.reset_stream(self.stream_id, ErrorCodes.REFUSED_STREAM)
self.close(reason)
def close(
self,
reason: StreamCloseReason,
errors: list[BaseException] | None = None,
from_protocol: bool = False,
) -> None:
if self.metadata["stream_closed_server"]:
raise StreamClosedError(self.stream_id)
if not isinstance(reason, StreamCloseReason):
raise TypeError(
f"Expected StreamCloseReason, received {reason.__class__.__qualname__}"
)
# Have default value of errors as an empty list as
# some cases can add a list of exceptions
errors = errors or []
if not from_protocol:
self._protocol.pop_stream(self.stream_id)
self.metadata["stream_closed_server"] = True
# We do not check for Content-Length or Transfer-Encoding in response headers
# and add `partial` flag as in HTTP/1.1 as 'A request or response that includes
# a payload body can include a content-length header field' (RFC 7540 - Section 8.1.2.6)
# NOTE: Order of handling the events is important here
# As we immediately cancel the request when maxsize is exceeded while
# receiving DATA_FRAME's when we have received the headers (not
# having Content-Length)
if reason in {
StreamCloseReason.MAXSIZE_EXCEEDED,
StreamCloseReason.MAXSIZE_EXCEEDED_ACTUAL,
}:
expected_size = int(
self._response["headers"].get(
b"Content-Length", self._response["flow_controlled_size"]
)
)
error_msg = get_maxsize_msg(
expected_size,
self._download_maxsize,
self._request,
expected=reason == StreamCloseReason.MAXSIZE_EXCEEDED,
)
logger.error(error_msg)
self._deferred_response.errback(DownloadCancelledError(error_msg))
elif reason is StreamCloseReason.ENDED:
self._fire_response_deferred()
# Stream was abruptly ended here
elif reason is StreamCloseReason.CANCELLED:
# Client has cancelled the request. Remove all the data
# received and fire the response deferred with no flags set
# NOTE: The data is already flushed in Stream.reset_stream() called
# immediately when the stream needs to be cancelled
# There maybe no :status in headers, we make
# HTTP Status Code: 499 - Client Closed Request
self._response["headers"][":status"] = "499"
self._fire_response_deferred()
elif reason is StreamCloseReason.RESET:
self._deferred_response.errback(
ResponseFailed(
[
Failure(
f"Remote peer {self._protocol.metadata['ip_address']} sent RST_STREAM",
ProtocolError,
)
]
)
)
elif reason is StreamCloseReason.CONNECTION_LOST:
self._deferred_response.errback(ResponseFailed(errors))
elif reason is StreamCloseReason.INACTIVE:
errors.insert(0, InactiveStreamClosed(self._request))
self._deferred_response.errback(ResponseFailed(errors))
else:
assert reason is StreamCloseReason.INVALID_HOSTNAME
self._deferred_response.errback(
InvalidHostname(
self._request,
str(self._protocol.metadata["uri"].host, "utf-8"),
f"{self._protocol.metadata['ip_address']}:{self._protocol.metadata['uri'].port}",
)
)
def _fire_response_deferred(self) -> None:
response = make_response(
url=self._request.url,
status=int(self._response["headers"][":status"]),
headers=self._response["headers"],
body=self._response["body"].getvalue(),
certificate=self._protocol.metadata["certificate"],
ip_address=self._protocol.metadata["ip_address"],
protocol="h2",
)
self._deferred_response.callback(response) | --- +++ @@ -32,6 +32,9 @@
class InactiveStreamClosed(ConnectionClosed):
+ """Connection was closed without sending request headers
+ of the stream. This happens when a stream is waiting for other
+ streams to close and connection is lost."""
def __init__(self, request: Request) -> None:
self.request = request
@@ -81,6 +84,15 @@
class Stream:
+ """Represents a single HTTP/2 Stream.
+
+ Stream is a bidirectional flow of bytes within an established connection,
+ which may carry one or more messages. Handles the transfer of HTTP Headers
+ and Data frames.
+
+ Role of this class is to
+ 1. Combine all the data frames
+ """
def __init__(
self,
@@ -90,6 +102,12 @@ download_maxsize: int = 0,
download_warnsize: int = 0,
) -> None:
+ """
+ Arguments:
+ stream_id -- Unique identifier for the stream within a single HTTP/2 connection
+ request -- The HTTP request associated to the stream
+ protocol -- Parent H2ClientProtocol instance
+ """
self.stream_id: int = stream_id
self._request: Request = request
self._protocol: H2ClientProtocol = protocol
@@ -151,6 +169,13 @@
@property
def _log_warnsize(self) -> bool:
+ """Checks if we have received data which exceeds the download warnsize
+ and whether we have not already logged about it.
+
+ Returns:
+ True if both the above conditions hold true
+ False if any of the conditions is false
+ """
content_length_header = int(
self._response["headers"].get(b"Content-Length", -1)
)
@@ -164,6 +189,9 @@ )
def get_response(self) -> Deferred[Response]:
+ """Simply return a Deferred which fires when response
+ from the asynchronous request is available
+ """
return self._deferred_response
def check_request_url(self) -> bool:
@@ -239,6 +267,16 @@ self.close(StreamCloseReason.INVALID_HOSTNAME)
def send_data(self) -> None:
+ """Called immediately after the headers are sent. Here we send all the
+ data as part of the request.
+
+ If the content length is 0 initially then we end the stream immediately and
+ wait for response data.
+
+ Warning: Only call this method when stream not closed from client side
+ and has initiated request already by sending HEADER frame. If not then
+ stream will raise ProtocolError (raise by h2 state machine).
+ """
if self.metadata["stream_closed_local"]:
raise StreamClosedError(self.stream_id)
@@ -283,6 +321,10 @@ # Ans: Remaining Data frames will be sent when we get a WindowUpdate frame
def receive_window_update(self) -> None:
+ """Flow control window size was changed.
+ Send data that earlier could not be sent as we were
+ blocked behind the flow control.
+ """
if (
self.metadata["remaining_content_length"]
and not self.metadata["stream_closed_server"]
@@ -335,6 +377,7 @@ logger.warning(warning_msg)
def reset_stream(self, reason: StreamCloseReason = StreamCloseReason.RESET) -> None:
+ """Close this stream by sending a RST_FRAME to the remote peer"""
if self.metadata["stream_closed_local"]:
raise StreamClosedError(self.stream_id)
@@ -351,6 +394,7 @@ errors: list[BaseException] | None = None,
from_protocol: bool = False,
) -> None:
+ """Based on the reason sent we will handle each case."""
if self.metadata["stream_closed_server"]:
raise StreamClosedError(self.stream_id)
@@ -440,6 +484,9 @@ )
def _fire_response_deferred(self) -> None:
+ """Builds response from the self._response dict
+ and fires the response deferred callback with the
+ generated response instance"""
response = make_response(
url=self._request.url,
@@ -450,4 +497,4 @@ ip_address=self._protocol.metadata["ip_address"],
protocol="h2",
)
- self._deferred_response.callback(response)+ self._deferred_response.callback(response)
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/http2/stream.py |
Add inline docstrings for readability |
from __future__ import annotations
import warnings
from time import time
from typing import TYPE_CHECKING
from urllib.parse import urldefrag, urlparse, urlunparse
from twisted.internet import defer
from twisted.internet.protocol import ClientFactory
from twisted.web.http import HTTPClient
from scrapy.exceptions import DownloadTimeoutError, ScrapyDeprecationWarning
from scrapy.http import Headers, Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes, to_unicode
if TYPE_CHECKING:
from scrapy import Request
class ScrapyHTTPPageGetter(HTTPClient):
delimiter = b"\n"
def __init__(self):
warnings.warn(
"ScrapyHTTPPageGetter is deprecated and will be removed in a future Scrapy version.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
super().__init__()
def connectionMade(self):
self.headers = Headers() # bucket for response headers
# Method command
self.sendCommand(self.factory.method, self.factory.path)
# Headers
for key, values in self.factory.headers.items():
for value in values:
self.sendHeader(key, value)
self.endHeaders()
# Body
if self.factory.body is not None:
self.transport.write(self.factory.body)
def lineReceived(self, line):
return HTTPClient.lineReceived(self, line.rstrip())
def handleHeader(self, key, value):
self.headers.appendlist(key, value)
def handleStatus(self, version, status, message):
self.factory.gotStatus(version, status, message)
def handleEndHeaders(self):
self.factory.gotHeaders(self.headers)
def connectionLost(self, reason):
self._connection_lost_reason = reason
HTTPClient.connectionLost(self, reason)
self.factory.noPage(reason)
def handleResponse(self, response):
if self.factory.method.upper() == b"HEAD":
self.factory.page(b"")
elif self.length is not None and self.length > 0:
self.factory.noPage(self._connection_lost_reason)
else:
self.factory.page(response)
self.transport.loseConnection()
def timeout(self):
self.transport.loseConnection()
# transport cleanup needed for HTTPS connections
if self.factory.url.startswith(b"https"):
self.transport.stopProducing()
self.factory.noPage(
DownloadTimeoutError(
f"Getting {self.factory.url} took longer "
f"than {self.factory.timeout} seconds."
)
)
# This class used to inherit from Twisted’s
# twisted.web.client.HTTPClientFactory. When that class was deprecated in
# Twisted (https://github.com/twisted/twisted/pull/643), we merged its
# non-overridden code into this class.
class ScrapyHTTPClientFactory(ClientFactory):
protocol = ScrapyHTTPPageGetter
waiting = 1
noisy = False
followRedirect = False
afterFoundGet = False
def _build_response(self, body, request):
request.meta["download_latency"] = self.headers_time - self.start_time
status = int(self.status)
headers = Headers(self.response_headers)
respcls = responsetypes.from_args(headers=headers, url=self._url, body=body)
return respcls(
url=self._url,
status=status,
headers=headers,
body=body,
protocol=to_unicode(self.version),
)
def _set_connection_attributes(self, request):
proxy = request.meta.get("proxy")
if proxy:
proxy_parsed = urlparse(to_bytes(proxy, encoding="ascii"))
self.scheme = proxy_parsed.scheme
self.host = proxy_parsed.hostname
self.port = proxy_parsed.port
self.netloc = proxy_parsed.netloc
if self.port is None:
self.port = 443 if proxy_parsed.scheme == b"https" else 80
self.path = self.url
else:
parsed = urlparse_cached(request)
path_str = urlunparse(
("", "", parsed.path or "/", parsed.params, parsed.query, "")
)
self.path = to_bytes(path_str, encoding="ascii")
assert parsed.hostname is not None
self.host = to_bytes(parsed.hostname, encoding="ascii")
self.port = parsed.port
self.scheme = to_bytes(parsed.scheme, encoding="ascii")
self.netloc = to_bytes(parsed.netloc, encoding="ascii")
if self.port is None:
self.port = 443 if self.scheme == b"https" else 80
def __init__(self, request: Request, timeout: float = 180):
warnings.warn(
"ScrapyHTTPClientFactory is deprecated and will be removed in a future Scrapy version.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
self._url: str = urldefrag(request.url)[0]
# converting to bytes to comply to Twisted interface
self.url: bytes = to_bytes(self._url, encoding="ascii")
self.method: bytes = to_bytes(request.method, encoding="ascii")
self.body: bytes | None = request.body or None
self.headers: Headers = Headers(request.headers)
self.response_headers: Headers | None = None
self.timeout: float = request.meta.get("download_timeout") or timeout
self.start_time: float = time()
self.deferred: defer.Deferred[Response] = defer.Deferred().addCallback(
self._build_response, request
)
# Fixes Twisted 11.1.0+ support as HTTPClientFactory is expected
# to have _disconnectedDeferred. See Twisted r32329.
# As Scrapy implements it's own logic to handle redirects is not
# needed to add the callback _waitForDisconnect.
# Specifically this avoids the AttributeError exception when
# clientConnectionFailed method is called.
self._disconnectedDeferred: defer.Deferred[None] = defer.Deferred()
self._set_connection_attributes(request)
# set Host header based on url
self.headers.setdefault("Host", self.netloc)
# set Content-Length based len of body
if self.body is not None:
self.headers["Content-Length"] = len(self.body)
# just in case a broken http/1.1 decides to keep connection alive
self.headers.setdefault("Connection", "close")
# Content-Length must be specified in POST method even with no body
elif self.method == b"POST":
self.headers["Content-Length"] = 0
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self._url}>"
def _cancelTimeout(self, result, timeoutCall):
if timeoutCall.active():
timeoutCall.cancel()
return result
def buildProtocol(self, addr):
p = ClientFactory.buildProtocol(self, addr)
p.followRedirect = self.followRedirect
p.afterFoundGet = self.afterFoundGet
if self.timeout:
from twisted.internet import reactor
timeoutCall = reactor.callLater(self.timeout, p.timeout)
self.deferred.addBoth(self._cancelTimeout, timeoutCall)
return p
def gotHeaders(self, headers):
self.headers_time = time()
self.response_headers = headers
def gotStatus(self, version, status, message):
self.version, self.status, self.message = version, status, message
def page(self, page):
if self.waiting:
self.waiting = 0
self.deferred.callback(page)
def noPage(self, reason):
if self.waiting:
self.waiting = 0
self.deferred.errback(reason)
def clientConnectionFailed(self, _, reason):
if self.waiting:
self.waiting = 0
# If the connection attempt failed, there is nothing more to
# disconnect, so just fire that Deferred now.
self._disconnectedDeferred.callback(None)
self.deferred.errback(reason) | --- +++ @@ -1,3 +1,4 @@+"""Deprecated HTTP/1.0 helper classes used by HTTP10DownloadHandler."""
from __future__ import annotations
@@ -202,6 +203,16 @@ self.response_headers = headers
def gotStatus(self, version, status, message):
+ """
+ Set the status of the request on us.
+ @param version: The HTTP version.
+ @type version: L{bytes}
+ @param status: The HTTP status code, an integer represented as a
+ bytestring.
+ @type status: L{bytes}
+ @param message: The HTTP status message.
+ @type message: L{bytes}
+ """
self.version, self.status, self.message = version, status, message
def page(self, page):
@@ -215,9 +226,14 @@ self.deferred.errback(reason)
def clientConnectionFailed(self, _, reason):
+ """
+ When a connection attempt fails, the request cannot be issued. If no
+ result has yet been provided to the result Deferred, provide the
+ connection failure reason as an error result.
+ """
if self.waiting:
self.waiting = 0
# If the connection attempt failed, there is nothing more to
# disconnect, so just fire that Deferred now.
self._disconnectedDeferred.callback(None)
- self.deferred.errback(reason)+ self.deferred.errback(reason)
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/downloader/webclient.py |
Generate docstrings for script automation |
from __future__ import annotations
import ipaddress
import logging
import ssl
from http.cookiejar import Cookie, CookieJar
from io import BytesIO
from typing import TYPE_CHECKING, Any, NoReturn, TypedDict
import httpx
from scrapy import Request, signals
from scrapy.exceptions import (
CannotResolveHostError,
DownloadCancelledError,
DownloadConnectionRefusedError,
DownloadFailedError,
DownloadTimeoutError,
NotConfigured,
ResponseDataLossError,
UnsupportedURLSchemeError,
)
from scrapy.http import Headers, Response
from scrapy.utils._download_handlers import (
BaseHttpDownloadHandler,
check_stop_download,
get_dataloss_msg,
get_maxsize_msg,
get_warnsize_msg,
make_response,
)
from scrapy.utils.asyncio import is_asyncio_available
from scrapy.utils.ssl import _log_sslobj_debug_info, _make_ssl_context
if TYPE_CHECKING:
from contextlib import AbstractAsyncContextManager
from http.client import HTTPResponse
from ipaddress import IPv4Address, IPv6Address
from urllib.request import Request as ULRequest
from httpcore import AsyncNetworkStream
from scrapy.crawler import Crawler
logger = logging.getLogger(__name__)
class _BaseResponseArgs(TypedDict):
status: int
url: str
headers: Headers
ip_address: IPv4Address | IPv6Address
protocol: str
# workaround for (and from) https://github.com/encode/httpx/issues/2992
class _NullCookieJar(CookieJar): # pragma: no cover
def extract_cookies(self, response: HTTPResponse, request: ULRequest) -> None:
pass
def set_cookie(self, cookie: Cookie) -> None:
pass
class HttpxDownloadHandler(BaseHttpDownloadHandler):
_DEFAULT_CONNECT_TIMEOUT = 10
def __init__(self, crawler: Crawler):
# we don't run extra-deps tests with the non-asyncio reactor
if not is_asyncio_available(): # pragma: no cover
raise NotConfigured(
f"{type(self).__name__} requires the asyncio support. Make"
f" sure that you have either enabled the asyncio Twisted"
f" reactor in the TWISTED_REACTOR setting or disabled the"
f" TWISTED_ENABLED setting. See the asyncio documentation"
f" of Scrapy for more information."
)
super().__init__(crawler)
logger.warning(
"HttpxDownloadHandler is experimental and is not recommented for production use."
)
self._tls_verbose_logging: bool = self.crawler.settings.getbool(
"DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING"
)
self._client = httpx.AsyncClient(
verify=_make_ssl_context(crawler.settings), cookies=_NullCookieJar()
)
async def download_request(self, request: Request) -> Response:
self._warn_unsupported_meta(request.meta)
timeout: float = request.meta.get(
"download_timeout", self._DEFAULT_CONNECT_TIMEOUT
)
try:
async with self._get_httpx_response(request, timeout) as httpx_response:
return await self._read_response(httpx_response, request)
except httpx.TimeoutException as e:
raise DownloadTimeoutError(
f"Getting {request.url} took longer than {timeout} seconds."
) from e
except httpx.UnsupportedProtocol as e:
raise UnsupportedURLSchemeError(str(e)) from e
except httpx.ConnectError as e:
if "Name or service not known" in str(e) or "getaddrinfo failed" in str(e):
raise CannotResolveHostError(str(e)) from e
raise DownloadConnectionRefusedError(str(e)) from e
except httpx.NetworkError as e:
raise DownloadFailedError(str(e)) from e
except httpx.RemoteProtocolError as e:
raise DownloadFailedError(str(e)) from e
def _warn_unsupported_meta(self, meta: dict[str, Any]) -> None:
if meta.get("bindaddress"):
# configurable only per-client:
# https://github.com/encode/httpx/issues/755#issuecomment-2746121794
logger.error(
f"The 'bindaddress' request meta key is not supported by"
f" {type(self).__name__} and will be ignored."
)
if meta.get("proxy"):
# configurable only per-client:
# https://github.com/encode/httpx/issues/486
logger.error(
f"The 'proxy' request meta key is not supported by"
f" {type(self).__name__} and will be ignored."
)
def _get_httpx_response(
self, request: Request, timeout: float
) -> AbstractAsyncContextManager[httpx.Response]:
return self._client.stream(
request.method,
request.url,
content=request.body,
headers=request.headers.to_tuple_list(),
timeout=timeout,
)
async def _read_response(
self, httpx_response: httpx.Response, request: Request
) -> Response:
maxsize: int = request.meta.get("download_maxsize", self._default_maxsize)
warnsize: int = request.meta.get("download_warnsize", self._default_warnsize)
content_length = httpx_response.headers.get("Content-Length")
expected_size = int(content_length) if content_length is not None else None
if maxsize and expected_size and expected_size > maxsize:
self._cancel_maxsize(expected_size, maxsize, request, expected=True)
reached_warnsize = False
if warnsize and expected_size and expected_size > warnsize:
reached_warnsize = True
logger.warning(
get_warnsize_msg(expected_size, warnsize, request, expected=True)
)
headers = Headers(httpx_response.headers.multi_items())
network_stream: AsyncNetworkStream = httpx_response.extensions["network_stream"]
make_response_base_args: _BaseResponseArgs = {
"status": httpx_response.status_code,
"url": request.url,
"headers": headers,
"ip_address": self._get_server_ip(network_stream),
"protocol": httpx_response.http_version,
}
self._log_tls_info(network_stream)
if stop_download := check_stop_download(
signals.headers_received,
self.crawler,
request,
headers=headers,
body_length=expected_size,
):
return make_response(
**make_response_base_args,
stop_download=stop_download,
)
response_body = BytesIO()
bytes_received = 0
try:
async for chunk in httpx_response.aiter_raw():
response_body.write(chunk)
bytes_received += len(chunk)
if stop_download := check_stop_download(
signals.bytes_received, self.crawler, request, data=chunk
):
return make_response(
**make_response_base_args,
body=response_body.getvalue(),
stop_download=stop_download,
)
if maxsize and bytes_received > maxsize:
response_body.truncate(0)
self._cancel_maxsize(
bytes_received, maxsize, request, expected=False
)
if warnsize and bytes_received > warnsize and not reached_warnsize:
reached_warnsize = True
logger.warning(
get_warnsize_msg(
bytes_received, warnsize, request, expected=False
)
)
except httpx.RemoteProtocolError as e:
# special handling of the dataloss case
if (
"peer closed connection without sending complete message body"
not in str(e)
):
raise
fail_on_dataloss: bool = request.meta.get(
"download_fail_on_dataloss", self._fail_on_dataloss
)
if not fail_on_dataloss:
return make_response(
**make_response_base_args,
body=response_body.getvalue(),
flags=["dataloss"],
)
self._log_dataloss_warning(request.url)
raise ResponseDataLossError(str(e)) from e
return make_response(
**make_response_base_args,
body=response_body.getvalue(),
)
@staticmethod
def _get_server_ip(network_stream: AsyncNetworkStream) -> IPv4Address | IPv6Address:
extra_server_addr = network_stream.get_extra_info("server_addr")
return ipaddress.ip_address(extra_server_addr[0])
def _log_tls_info(self, network_stream: AsyncNetworkStream) -> None:
if not self._tls_verbose_logging:
return
extra_ssl_object = network_stream.get_extra_info("ssl_object")
if isinstance(extra_ssl_object, ssl.SSLObject):
_log_sslobj_debug_info(extra_ssl_object)
def _log_dataloss_warning(self, url: str) -> None:
if self._fail_on_dataloss_warned:
return
logger.warning(get_dataloss_msg(url))
self._fail_on_dataloss_warned = True
@staticmethod
def _cancel_maxsize(
size: int, limit: int, request: Request, *, expected: bool
) -> NoReturn:
warning_msg = get_maxsize_msg(size, limit, request, expected=expected)
logger.warning(warning_msg)
raise DownloadCancelledError(warning_msg)
async def close(self) -> None:
await self._client.aclose() | --- +++ @@ -1,3 +1,4 @@+"""``httpx``-based HTTP(S) download handler. Currently not recommended for production use."""
from __future__ import annotations
@@ -57,6 +58,7 @@
# workaround for (and from) https://github.com/encode/httpx/issues/2992
class _NullCookieJar(CookieJar): # pragma: no cover
+ """A CookieJar that rejects all cookies."""
def extract_cookies(self, response: HTTPResponse, request: ULRequest) -> None:
pass
@@ -264,4 +266,4 @@ raise DownloadCancelledError(warning_msg)
async def close(self) -> None:
- await self._client.aclose()+ await self._client.aclose()
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/downloader/handlers/_httpx.py |
Create simple docstrings for beginners |
from __future__ import annotations
import inspect
import logging
import warnings
from typing import TYPE_CHECKING, Any, Protocol, cast
from scrapy import Request, Spider, signals
from scrapy.exceptions import NotConfigured, NotSupported, ScrapyDeprecationWarning
from scrapy.utils.defer import (
deferred_from_coro,
ensure_awaitable,
maybe_deferred_to_future,
)
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.python import global_object_name, without_none_values
if TYPE_CHECKING:
from collections.abc import Callable
from twisted.internet.defer import Deferred
from scrapy.crawler import Crawler
from scrapy.http import Response
logger = logging.getLogger(__name__)
# This is the official API but we temporarily support the old deprecated one:
# * lazy is not mandatory (defaults to True).
# * download_request() can return a Deferred[Response] instead of a coroutine,
# and takes a spider argument in this case.
# * close() can return None or Deferred[None] instead of a coroutine.
# * close() is not mandatory.
class DownloadHandlerProtocol(Protocol):
lazy: bool
async def download_request(self, request: Request) -> Response: ...
async def close(self) -> None: ...
class DownloadHandlers:
def __init__(self, crawler: Crawler):
self._crawler: Crawler = crawler
# stores acceptable schemes on instancing
self._schemes: dict[str, str | Callable[..., Any]] = {}
# stores instanced handlers for schemes
self._handlers: dict[str, DownloadHandlerProtocol] = {}
# remembers failed handlers
self._notconfigured: dict[str, str] = {}
# remembers handlers with Deferred-based download_request()
self._old_style_handlers: set[str] = set()
handlers: dict[str, str | Callable[..., Any]] = without_none_values(
cast(
"dict[str, str | Callable[..., Any]]",
crawler.settings.getwithbase("DOWNLOAD_HANDLERS"),
)
)
for scheme, clspath in handlers.items():
self._schemes[scheme] = clspath
self._load_handler(scheme, skip_lazy=True)
crawler.signals.connect(self._close, signals.engine_stopped)
def _get_handler(self, scheme: str) -> DownloadHandlerProtocol | None:
if scheme in self._handlers:
return self._handlers[scheme]
if scheme in self._notconfigured:
return None
if scheme not in self._schemes:
self._notconfigured[scheme] = "no handler available for that scheme"
return None
return self._load_handler(scheme)
def _load_handler(
self, scheme: str, skip_lazy: bool = False
) -> DownloadHandlerProtocol | None:
path = self._schemes[scheme]
try:
dhcls: type[DownloadHandlerProtocol] = load_object(path)
if skip_lazy:
if not hasattr(dhcls, "lazy"):
warnings.warn(
f"{global_object_name(dhcls)} doesn't define a 'lazy' attribute."
f" This is deprecated, please add 'lazy = True' (which is the current"
f" default value) to the class definition.",
category=ScrapyDeprecationWarning,
stacklevel=1,
)
if getattr(dhcls, "lazy", True):
return None
dh = build_from_crawler(
dhcls,
self._crawler,
)
except NotConfigured as ex:
self._notconfigured[scheme] = str(ex)
return None
except Exception as ex:
logger.error(
'Loading "%(clspath)s" for scheme "%(scheme)s"',
{"clspath": path, "scheme": scheme},
exc_info=True,
extra={"crawler": self._crawler},
)
self._notconfigured[scheme] = str(ex)
return None
self._handlers[scheme] = dh
if not inspect.iscoroutinefunction(dh.download_request): # pragma: no cover
warnings.warn(
f"{global_object_name(dh.download_request)} is not a coroutine function."
f" This is deprecated, please rewrite it to return a coroutine and remove"
f" the 'spider' argument.",
category=ScrapyDeprecationWarning,
stacklevel=1,
)
self._old_style_handlers.add(scheme)
return dh
def download_request(
self, request: Request, spider: Spider | None = None
) -> Deferred[Response]: # pragma: no cover
warnings.warn(
"DownloadHandlers.download_request() is deprecated, use download_request_async() instead",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.download_request_async(request))
async def download_request_async(self, request: Request) -> Response:
scheme = urlparse_cached(request).scheme
handler = self._get_handler(scheme)
if not handler:
raise NotSupported(
f"Unsupported URL scheme '{scheme}': {self._notconfigured[scheme]}"
)
assert self._crawler.spider
if scheme in self._old_style_handlers: # pragma: no cover
return await maybe_deferred_to_future(
cast(
"Deferred[Response]",
handler.download_request(request, self._crawler.spider), # type: ignore[call-arg]
)
)
return await handler.download_request(request)
async def _close(self) -> None:
for dh in self._handlers.values():
if not hasattr(dh, "close"): # pragma: no cover
warnings.warn(
f"{global_object_name(dh)} doesn't define a close() method."
f" This is deprecated, please add an empty 'async def close()' method.",
category=ScrapyDeprecationWarning,
stacklevel=1,
)
continue
if inspect.iscoroutinefunction(dh.close):
await dh.close()
else: # pragma: no cover
warnings.warn(
f"{global_object_name(dh.close)} is not a coroutine function."
f" This is deprecated, please rewrite it to return a coroutine.",
category=ScrapyDeprecationWarning,
stacklevel=1,
)
await ensure_awaitable(dh.close()) | --- +++ @@ -1,3 +1,4 @@+"""Download handlers for different schemes"""
from __future__ import annotations
@@ -69,6 +70,9 @@ crawler.signals.connect(self._close, signals.engine_stopped)
def _get_handler(self, scheme: str) -> DownloadHandlerProtocol | None:
+ """Lazy-load the downloadhandler for a scheme
+ only on the first request for that scheme.
+ """
if scheme in self._handlers:
return self._handlers[scheme]
if scheme in self._notconfigured:
@@ -171,4 +175,4 @@ category=ScrapyDeprecationWarning,
stacklevel=1,
)
- await ensure_awaitable(dh.close())+ await ensure_awaitable(dh.close())
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/downloader/handlers/__init__.py |
Please document this code using docstrings |
from __future__ import annotations
import asyncio
import logging
import warnings
from time import time
from traceback import format_exc
from typing import TYPE_CHECKING, Any
from twisted.internet.defer import CancelledError, Deferred, inlineCallbacks
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.core.scheduler import BaseScheduler
from scrapy.core.scraper import Scraper
from scrapy.exceptions import (
CloseSpider,
DontCloseSpider,
IgnoreRequest,
ScrapyDeprecationWarning,
)
from scrapy.http import Request, Response
from scrapy.utils.asyncio import (
AsyncioLoopingCall,
create_looping_call,
is_asyncio_available,
)
from scrapy.utils.defer import (
_schedule_coro,
deferred_from_coro,
ensure_awaitable,
maybe_deferred_to_future,
)
from scrapy.utils.deprecate import argument_is_required
from scrapy.utils.log import failure_to_exc_info, logformatter_adapter
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.python import global_object_name
from scrapy.utils.reactor import CallLaterOnce
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Callable, Coroutine, Generator
from twisted.internet.task import LoopingCall
from scrapy.core.downloader import Downloader
from scrapy.crawler import Crawler
from scrapy.logformatter import LogFormatter
from scrapy.settings import BaseSettings, Settings
from scrapy.signalmanager import SignalManager
from scrapy.spiders import Spider
logger = logging.getLogger(__name__)
class _Slot:
def __init__(
self,
close_if_idle: bool,
nextcall: CallLaterOnce[None],
scheduler: BaseScheduler,
) -> None:
self.closing: Deferred[None] | None = None
self.inprogress: set[Request] = set()
self.close_if_idle: bool = close_if_idle
self.nextcall: CallLaterOnce[None] = nextcall
self.scheduler: BaseScheduler = scheduler
self.heartbeat: AsyncioLoopingCall | LoopingCall = create_looping_call(
nextcall.schedule
)
def add_request(self, request: Request) -> None:
self.inprogress.add(request)
def remove_request(self, request: Request) -> None:
self.inprogress.remove(request)
self._maybe_fire_closing()
async def close(self) -> None:
self.closing = Deferred()
self._maybe_fire_closing()
await maybe_deferred_to_future(self.closing)
def _maybe_fire_closing(self) -> None:
if self.closing is not None and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
if self.heartbeat.running:
self.heartbeat.stop()
self.closing.callback(None)
class ExecutionEngine:
_SLOT_HEARTBEAT_INTERVAL: float = 5.0
def __init__(
self,
crawler: Crawler,
spider_closed_callback: Callable[
[Spider], Coroutine[Any, Any, None] | Deferred[None] | None
],
) -> None:
self.crawler: Crawler = crawler
self.settings: Settings = crawler.settings
self.signals: SignalManager = crawler.signals
assert crawler.logformatter
self.logformatter: LogFormatter = crawler.logformatter
self._slot: _Slot | None = None
self.spider: Spider | None = None
self.running: bool = False
self._starting: bool = False
self._stopping: bool = False
self.paused: bool = False
self._spider_closed_callback: Callable[
[Spider], Coroutine[Any, Any, None] | Deferred[None] | None
] = spider_closed_callback
self.start_time: float | None = None
self._start: AsyncIterator[Any] | None = None
self._closewait: Deferred[None] | None = None
self._start_request_processing_awaitable: (
asyncio.Future[None] | Deferred[None] | None
) = None
downloader_cls: type[Downloader] = load_object(self.settings["DOWNLOADER"])
try:
self.scheduler_cls: type[BaseScheduler] = self._get_scheduler_class(
crawler.settings
)
self.downloader: Downloader = downloader_cls(crawler)
self._downloader_fetch_needs_spider: bool = argument_is_required(
self.downloader.fetch, "spider"
)
if self._downloader_fetch_needs_spider:
warnings.warn(
f"The fetch() method of {global_object_name(downloader_cls)} requires a spider argument,"
f" this is deprecated and the argument will not be passed in future Scrapy versions.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self.scraper: Scraper = Scraper(crawler)
except Exception:
if hasattr(self, "downloader"):
self.downloader.close()
raise
def _get_scheduler_class(self, settings: BaseSettings) -> type[BaseScheduler]:
scheduler_cls: type[BaseScheduler] = load_object(settings["SCHEDULER"])
if not issubclass(scheduler_cls, BaseScheduler):
raise TypeError(
f"The provided scheduler class ({settings['SCHEDULER']})"
" does not fully implement the scheduler interface"
)
return scheduler_cls
def start(
self, _start_request_processing: bool = True
) -> Deferred[None]: # pragma: no cover
warnings.warn(
"ExecutionEngine.start() is deprecated, use start_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(
self.start_async(_start_request_processing=_start_request_processing)
)
async def start_async(self, *, _start_request_processing: bool = True) -> None:
if self._starting:
raise RuntimeError("Engine already running")
self.start_time = time()
self._starting = True
await self.signals.send_catch_log_async(signal=signals.engine_started)
if self._stopping:
# band-aid until https://github.com/scrapy/scrapy/issues/6916
return
if _start_request_processing and self.spider is None:
# require an opened spider when not run in scrapy shell
return
self.running = True
self._closewait = Deferred()
if _start_request_processing:
coro = self._start_request_processing()
if is_asyncio_available():
# not wrapping in a Deferred here to avoid https://github.com/twisted/twisted/issues/12470
# (can happen when this is cancelled, e.g. in test_close_during_start_iteration())
self._start_request_processing_awaitable = asyncio.ensure_future(coro)
else:
self._start_request_processing_awaitable = Deferred.fromCoroutine(coro)
await maybe_deferred_to_future(self._closewait)
def stop(self) -> Deferred[None]: # pragma: no cover
warnings.warn(
"ExecutionEngine.stop() is deprecated, use stop_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.stop_async())
async def stop_async(self) -> None:
if not self._starting:
raise RuntimeError("Engine not running")
self.running = self._starting = False
self._stopping = True
if self._start_request_processing_awaitable is not None:
if (
not is_asyncio_available()
or self._start_request_processing_awaitable
is not asyncio.current_task()
):
# If using the asyncio loop and stop_async() was called from
# start() itself, we can't cancel it, and _start_request_processing()
# will exit via the self.running check.
self._start_request_processing_awaitable.cancel()
self._start_request_processing_awaitable = None
if self.spider is not None:
await self.close_spider_async(reason="shutdown")
await self.signals.send_catch_log_async(signal=signals.engine_stopped)
if self._closewait:
self._closewait.callback(None)
def close(self) -> Deferred[None]: # pragma: no cover
warnings.warn(
"ExecutionEngine.close() is deprecated, use close_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.close_async())
async def close_async(self) -> None:
if self.running:
await self.stop_async() # will also close spider and downloader
elif self.spider is not None:
await self.close_spider_async(
reason="shutdown"
) # will also close downloader
elif hasattr(self, "downloader"):
self.downloader.close()
def pause(self) -> None:
self.paused = True
def unpause(self) -> None:
self.paused = False
async def _process_start_next(self) -> None:
assert self._start is not None
try:
item_or_request = await self._start.__anext__()
except StopAsyncIteration:
self._start = None
except Exception as exception:
self._start = None
exception_traceback = format_exc()
logger.error(
f"Error while reading start items and requests: {exception}.\n{exception_traceback}",
exc_info=True,
)
else:
if not self.spider:
return # spider already closed
if isinstance(item_or_request, Request):
self.crawl(item_or_request)
else:
assert self._slot is not None
_schedule_coro(
self.scraper.start_itemproc_async(item_or_request, response=None)
)
self._slot.nextcall.schedule()
async def _start_request_processing(self) -> None:
# Starts the processing of scheduled requests, as well as a periodic
# call to that processing method for scenarios where the scheduler
# reports having pending requests but returns none.
try:
assert self._slot is not None # typing
self._slot.nextcall.schedule()
self._slot.heartbeat.start(self._SLOT_HEARTBEAT_INTERVAL)
while self._start and self.spider and self.running:
await self._process_start_next()
if not self.needs_backout():
# Give room for the outcome of self._process_start_next() to be
# processed before continuing with the next iteration.
self._slot.nextcall.schedule()
await self._slot.nextcall.wait()
except (asyncio.exceptions.CancelledError, CancelledError):
# self.stop_async() has cancelled us, nothing to do
return
except Exception:
# an error happened, log it and stop the engine
self._start_request_processing_awaitable = None
logger.error(
"Error while processing requests from start()",
exc_info=True,
extra={"spider": self.spider},
)
await self.stop_async()
def _start_scheduled_requests(self) -> None:
if self._slot is None or self._slot.closing is not None or self.paused:
return
while not self.needs_backout():
if not self._start_scheduled_request():
break
if self.spider_is_idle() and self._slot.close_if_idle:
self._spider_idle()
def needs_backout(self) -> bool:
assert self.scraper.slot is not None # typing
return (
not self.running
or not self._slot
or bool(self._slot.closing)
or self.downloader.needs_backout()
or self.scraper.slot.needs_backout()
)
def _start_scheduled_request(self) -> bool:
assert self._slot is not None # typing
assert self.spider is not None # typing
request = self._slot.scheduler.next_request()
if request is None:
self.signals.send_catch_log(signals.scheduler_empty)
return False
d: Deferred[Response | Request] = self._download(request)
d.addBoth(self._handle_downloader_output, request)
d.addErrback(
lambda f: logger.info(
"Error while handling downloader output",
exc_info=failure_to_exc_info(f),
extra={"spider": self.spider},
)
)
def _remove_request(_: Any) -> None:
assert self._slot
self._slot.remove_request(request)
d2: Deferred[None] = d.addBoth(_remove_request)
d2.addErrback(
lambda f: logger.info(
"Error while removing request from slot",
exc_info=failure_to_exc_info(f),
extra={"spider": self.spider},
)
)
slot = self._slot
d2.addBoth(lambda _: slot.nextcall.schedule())
d2.addErrback(
lambda f: logger.info(
"Error while scheduling new request",
exc_info=failure_to_exc_info(f),
extra={"spider": self.spider},
)
)
return True
@inlineCallbacks
def _handle_downloader_output(
self, result: Request | Response | Failure, request: Request
) -> Generator[Deferred[Any], Any, None]:
if not isinstance(result, (Request, Response, Failure)):
raise TypeError(
f"Incorrect type: expected Request, Response or Failure, got {type(result)}: {result!r}"
)
# downloader middleware can return requests (for example, redirects)
if isinstance(result, Request):
self.crawl(result)
return
try:
yield self.scraper.enqueue_scrape(result, request)
except Exception:
assert self.spider is not None
logger.error(
"Error while enqueuing scrape",
exc_info=True,
extra={"spider": self.spider},
)
def spider_is_idle(self) -> bool:
if self._slot is None:
raise RuntimeError("Engine slot not assigned")
if not self.scraper.slot.is_idle(): # type: ignore[union-attr]
return False
if self.downloader.active: # downloader has pending requests
return False
if self._start is not None: # not all start requests are handled
return False
return not self._slot.scheduler.has_pending_requests()
def crawl(self, request: Request) -> None:
if self.spider is None:
raise RuntimeError(f"No open spider to crawl: {request}")
self._schedule_request(request)
self._slot.nextcall.schedule() # type: ignore[union-attr]
def _schedule_request(self, request: Request) -> None:
request_scheduled_result = self.signals.send_catch_log(
signals.request_scheduled,
request=request,
spider=self.spider,
dont_log=IgnoreRequest,
)
for _, result in request_scheduled_result:
if isinstance(result, Failure) and isinstance(result.value, IgnoreRequest):
return
if not self._slot.scheduler.enqueue_request(request): # type: ignore[union-attr]
self.signals.send_catch_log(
signals.request_dropped, request=request, spider=self.spider
)
def download(self, request: Request) -> Deferred[Response]:
warnings.warn(
"ExecutionEngine.download() is deprecated, use download_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.download_async(request))
async def download_async(self, request: Request) -> Response:
if self.spider is None:
raise RuntimeError(f"No open spider to crawl: {request}")
try:
response_or_request = await maybe_deferred_to_future(
self._download(request)
)
finally:
assert self._slot is not None
self._slot.remove_request(request)
if isinstance(response_or_request, Request):
return await self.download_async(response_or_request)
return response_or_request
@inlineCallbacks
def _download(
self, request: Request
) -> Generator[Deferred[Any], Any, Response | Request]:
assert self._slot is not None # typing
assert self.spider is not None
self._slot.add_request(request)
try:
result: Response | Request
if self._downloader_fetch_needs_spider:
result = yield self.downloader.fetch(request, self.spider)
else:
result = yield self.downloader.fetch(request)
if not isinstance(result, (Response, Request)):
raise TypeError(
f"Incorrect type: expected Response or Request, got {type(result)}: {result!r}"
)
if isinstance(result, Response):
if result.request is None:
result.request = request
logkws = self.logformatter.crawled(result.request, result, self.spider)
if logkws is not None:
logger.log(
*logformatter_adapter(logkws), extra={"spider": self.spider}
)
self.signals.send_catch_log(
signal=signals.response_received,
response=result,
request=result.request,
spider=self.spider,
)
return result
finally:
self._slot.nextcall.schedule()
def open_spider(
self, spider: Spider, close_if_idle: bool = True
) -> Deferred[None]: # pragma: no cover
warnings.warn(
"ExecutionEngine.open_spider() is deprecated, use open_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.open_spider_async(close_if_idle=close_if_idle))
async def open_spider_async(self, *, close_if_idle: bool = True) -> None:
assert self.crawler.spider
if self._slot is not None:
raise RuntimeError(
f"No free spider slot when opening {self.crawler.spider.name!r}"
)
logger.info("Spider opened", extra={"spider": self.crawler.spider})
self.spider = self.crawler.spider
nextcall = CallLaterOnce(self._start_scheduled_requests)
scheduler = build_from_crawler(self.scheduler_cls, self.crawler)
self._slot = _Slot(close_if_idle, nextcall, scheduler)
self._start = await self.scraper.spidermw.process_start()
if hasattr(scheduler, "open") and (d := scheduler.open(self.crawler.spider)):
await maybe_deferred_to_future(d)
await self.scraper.open_spider_async()
assert self.crawler.stats
if argument_is_required(self.crawler.stats.open_spider, "spider"):
warnings.warn(
f"The open_spider() method of {global_object_name(type(self.crawler.stats))} requires a spider argument,"
f" this is deprecated and the argument will not be passed in future Scrapy versions.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self.crawler.stats.open_spider(spider=self.crawler.spider)
else:
self.crawler.stats.open_spider()
await self.signals.send_catch_log_async(
signals.spider_opened, spider=self.crawler.spider
)
def _spider_idle(self) -> None:
assert self.spider is not None # typing
expected_ex = (DontCloseSpider, CloseSpider)
res = self.signals.send_catch_log(
signals.spider_idle, spider=self.spider, dont_log=expected_ex
)
detected_ex = {
ex: x.value
for _, x in res
for ex in expected_ex
if isinstance(x, Failure) and isinstance(x.value, ex)
}
if DontCloseSpider in detected_ex:
return
if self.spider_is_idle():
ex = detected_ex.get(CloseSpider, CloseSpider(reason="finished"))
assert isinstance(ex, CloseSpider) # typing
_schedule_coro(self.close_spider_async(reason=ex.reason))
def close_spider(
self, spider: Spider, reason: str = "cancelled"
) -> Deferred[None]: # pragma: no cover
warnings.warn(
"ExecutionEngine.close_spider() is deprecated, use close_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.close_spider_async(reason=reason))
async def close_spider_async(self, *, reason: str = "cancelled") -> None: # noqa: PLR0912
if self.spider is None:
raise RuntimeError("Spider not opened")
if self._slot is None:
raise RuntimeError("Engine slot not assigned")
if self._slot.closing is not None:
await maybe_deferred_to_future(self._slot.closing)
return
spider = self.spider
logger.info(
"Closing spider (%(reason)s)", {"reason": reason}, extra={"spider": spider}
)
def log_failure(msg: str) -> None:
logger.error(msg, exc_info=True, extra={"spider": spider}) # noqa: LOG014
try:
await self._slot.close()
except Exception:
log_failure("Slot close failure")
try:
self.downloader.close()
except Exception:
log_failure("Downloader close failure")
try:
await self.scraper.close_spider_async()
except Exception:
log_failure("Scraper close failure")
if hasattr(self._slot.scheduler, "close"):
try:
if (d := self._slot.scheduler.close(reason)) is not None:
await maybe_deferred_to_future(d)
except Exception:
log_failure("Scheduler close failure")
try:
await self.signals.send_catch_log_async(
signal=signals.spider_closed,
spider=spider,
reason=reason,
)
except Exception:
log_failure("Error while sending spider_close signal")
assert self.crawler.stats
try:
if argument_is_required(self.crawler.stats.close_spider, "spider"):
warnings.warn(
f"The close_spider() method of {global_object_name(type(self.crawler.stats))} requires a spider argument,"
f" this is deprecated and the argument will not be passed in future Scrapy versions.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self.crawler.stats.close_spider(
spider=self.crawler.spider, reason=reason
)
else:
self.crawler.stats.close_spider(reason=reason)
except Exception:
log_failure("Stats close failure")
logger.info(
"Spider closed (%(reason)s)",
{"reason": reason},
extra={"spider": spider},
)
self._slot = None
self.spider = None
try:
await ensure_awaitable(self._spider_closed_callback(spider))
except Exception:
log_failure("Error running spider_closed_callback") | --- +++ @@ -1,3 +1,9 @@+"""
+This is the Scrapy engine which controls the Scheduler, Downloader and Spider.
+
+For more information see docs/topics/architecture.rst
+
+"""
from __future__ import annotations
@@ -166,6 +172,10 @@ )
async def start_async(self, *, _start_request_processing: bool = True) -> None:
+ """Start the execution engine.
+
+ .. versionadded:: 2.14
+ """
if self._starting:
raise RuntimeError("Engine already running")
self.start_time = time()
@@ -198,6 +208,10 @@ return deferred_from_coro(self.stop_async())
async def stop_async(self) -> None:
+ """Gracefully stop the execution engine.
+
+ .. versionadded:: 2.14
+ """
if not self._starting:
raise RuntimeError("Engine not running")
@@ -230,6 +244,10 @@ return deferred_from_coro(self.close_async())
async def close_async(self) -> None:
+ """
+ Gracefully close the execution engine.
+ If it has already been started, stop it. In all cases, close the spider and the downloader.
+ """
if self.running:
await self.stop_async() # will also close spider and downloader
elif self.spider is not None:
@@ -246,6 +264,11 @@ self.paused = False
async def _process_start_next(self) -> None:
+ """Processes the next item or request from Spider.start().
+
+ If a request, it is scheduled. If an item, it is sent to item
+ pipelines.
+ """
assert self._start is not None
try:
item_or_request = await self._start.__anext__()
@@ -271,6 +294,8 @@ self._slot.nextcall.schedule()
async def _start_request_processing(self) -> None:
+ """Starts consuming Spider.start() output and sending scheduled
+ requests."""
# Starts the processing of scheduled requests, as well as a periodic
# call to that processing method for scenarios where the scheduler
# reports having pending requests but returns none.
@@ -311,6 +336,11 @@ self._spider_idle()
def needs_backout(self) -> bool:
+ """Returns ``True`` if no more requests can be sent at the moment, or
+ ``False`` otherwise.
+
+ See :ref:`start-requests-lazy` for an example.
+ """
assert self.scraper.slot is not None # typing
return (
not self.running
@@ -398,6 +428,7 @@ return not self._slot.scheduler.has_pending_requests()
def crawl(self, request: Request) -> None:
+ """Inject the request into the spider <-> downloader pipeline"""
if self.spider is None:
raise RuntimeError(f"No open spider to crawl: {request}")
self._schedule_request(request)
@@ -419,6 +450,7 @@ )
def download(self, request: Request) -> Deferred[Response]:
+ """Return a Deferred which fires with a Response as result, only downloader middlewares are applied"""
warnings.warn(
"ExecutionEngine.download() is deprecated, use download_async() instead",
ScrapyDeprecationWarning,
@@ -427,6 +459,12 @@ return deferred_from_coro(self.download_async(request))
async def download_async(self, request: Request) -> Response:
+ """Return a coroutine which fires with a Response as result.
+
+ Only downloader middlewares are applied.
+
+ .. versionadded:: 2.14
+ """
if self.spider is None:
raise RuntimeError(f"No open spider to crawl: {request}")
try:
@@ -517,6 +555,12 @@ )
def _spider_idle(self) -> None:
+ """
+ Called when a spider gets idle, i.e. when there are no remaining requests to download or schedule.
+ It can be called multiple times. If a handler for the spider_idle signal raises a DontCloseSpider
+ exception, the spider is not closed until the next loop and this function is guaranteed to be called
+ (at least) once again. A handler can raise CloseSpider to provide a custom closing reason.
+ """
assert self.spider is not None # typing
expected_ex = (DontCloseSpider, CloseSpider)
res = self.signals.send_catch_log(
@@ -546,6 +590,10 @@ return deferred_from_coro(self.close_spider_async(reason=reason))
async def close_spider_async(self, *, reason: str = "cancelled") -> None: # noqa: PLR0912
+ """Close (cancel) spider and clear all its outstanding requests.
+
+ .. versionadded:: 2.14
+ """
if self.spider is None:
raise RuntimeError("Spider not opened")
@@ -625,4 +673,4 @@ try:
await ensure_awaitable(self._spider_closed_callback(spider))
except Exception:
- log_failure("Error running spider_closed_callback")+ log_failure("Error running spider_closed_callback")
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/engine.py |
Add docstrings to improve code quality |
from __future__ import annotations
import ipaddress
import logging
import re
from contextlib import suppress
from io import BytesIO
from time import time
from typing import TYPE_CHECKING, Any, TypedDict, TypeVar, cast
from urllib.parse import urldefrag, urlparse
from twisted.internet import ssl
from twisted.internet.defer import Deferred, succeed
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.internet.protocol import Factory, Protocol, connectionDone
from twisted.python.failure import Failure
from twisted.web.client import (
URI,
Agent,
HTTPConnectionPool,
ResponseDone,
ResponseFailed,
)
from twisted.web.client import Response as TxResponse
from twisted.web.http import PotentialDataLoss, _DataLoss
from twisted.web.http_headers import Headers as TxHeaders
from twisted.web.iweb import UNKNOWN_LENGTH, IBodyProducer, IPolicyForHTTPS, IResponse
from zope.interface import implementer
from scrapy import Request, signals
from scrapy.core.downloader.contextfactory import load_context_factory_from_settings
from scrapy.exceptions import (
DownloadCancelledError,
DownloadTimeoutError,
ResponseDataLossError,
StopDownload,
)
from scrapy.http import Headers, Response
from scrapy.utils._download_handlers import (
BaseHttpDownloadHandler,
check_stop_download,
get_dataloss_msg,
get_maxsize_msg,
get_warnsize_msg,
make_response,
wrap_twisted_exceptions,
)
from scrapy.utils.defer import maybe_deferred_to_future
from scrapy.utils.deprecate import warn_on_deprecated_spider_attribute
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes, to_unicode
from scrapy.utils.url import add_http_if_no_scheme
if TYPE_CHECKING:
from twisted.internet.base import ReactorBase
from twisted.internet.interfaces import IConsumer
# typing.NotRequired requires Python 3.11
from typing_extensions import NotRequired
from scrapy.crawler import Crawler
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
class _ResultT(TypedDict):
txresponse: TxResponse
body: NotRequired[bytes]
flags: NotRequired[list[str] | None]
certificate: NotRequired[ssl.Certificate | None]
ip_address: NotRequired[ipaddress.IPv4Address | ipaddress.IPv6Address | None]
stop_download: NotRequired[StopDownload | None]
class HTTP11DownloadHandler(BaseHttpDownloadHandler):
def __init__(self, crawler: Crawler):
super().__init__(crawler)
self._crawler = crawler
from twisted.internet import reactor
self._pool: HTTPConnectionPool = HTTPConnectionPool(reactor, persistent=True)
self._pool.maxPersistentPerHost = crawler.settings.getint(
"CONCURRENT_REQUESTS_PER_DOMAIN"
)
self._pool._factory.noisy = False
self._contextFactory: IPolicyForHTTPS = load_context_factory_from_settings(
crawler.settings, crawler
)
self._disconnect_timeout: int = 1
async def download_request(self, request: Request) -> Response:
if hasattr(self._crawler.spider, "download_maxsize"): # pragma: no cover
warn_on_deprecated_spider_attribute("download_maxsize", "DOWNLOAD_MAXSIZE")
if hasattr(self._crawler.spider, "download_warnsize"): # pragma: no cover
warn_on_deprecated_spider_attribute(
"download_warnsize", "DOWNLOAD_WARNSIZE"
)
agent = ScrapyAgent(
contextFactory=self._contextFactory,
pool=self._pool,
maxsize=getattr(
self._crawler.spider, "download_maxsize", self._default_maxsize
),
warnsize=getattr(
self._crawler.spider, "download_warnsize", self._default_warnsize
),
fail_on_dataloss=self._fail_on_dataloss,
crawler=self._crawler,
)
try:
with wrap_twisted_exceptions():
return await maybe_deferred_to_future(agent.download_request(request))
except ResponseDataLossError:
if not self._fail_on_dataloss_warned:
logger.warning(get_dataloss_msg(request.url))
self._fail_on_dataloss_warned = True
raise
async def close(self) -> None:
from twisted.internet import reactor
d: Deferred[None] = self._pool.closeCachedConnections()
# closeCachedConnections will hang on network or server issues, so
# we'll manually timeout the deferred.
#
# Twisted issue addressing this problem can be found here:
# https://github.com/twisted/twisted/issues/7738
#
# closeCachedConnections doesn't handle external errbacks, so we'll
# issue a callback after `_disconnect_timeout` seconds.
#
# See also https://github.com/scrapy/scrapy/issues/2653
delayed_call = reactor.callLater(self._disconnect_timeout, d.callback, [])
try:
await maybe_deferred_to_future(d)
finally:
if delayed_call.active():
delayed_call.cancel()
class TunnelError(Exception):
class TunnelingTCP4ClientEndpoint(TCP4ClientEndpoint):
_truncatedLength = 1000
_responseAnswer = (
r"HTTP/1\.. (?P<status>\d{3})(?P<reason>.{," + str(_truncatedLength) + r"})"
)
_responseMatcher = re.compile(_responseAnswer.encode())
def __init__(
self,
reactor: ReactorBase,
host: str,
port: int,
proxyConf: tuple[str, int, bytes | None],
contextFactory: IPolicyForHTTPS,
timeout: float = 30,
bindAddress: tuple[str, int] | None = None,
):
proxyHost, proxyPort, self._proxyAuthHeader = proxyConf
super().__init__(reactor, proxyHost, proxyPort, timeout, bindAddress)
self._tunnelReadyDeferred: Deferred[Protocol] = Deferred()
self._tunneledHost: str = host
self._tunneledPort: int = port
self._contextFactory: IPolicyForHTTPS = contextFactory
self._connectBuffer: bytearray = bytearray()
def requestTunnel(self, protocol: Protocol) -> Protocol:
assert protocol.transport
tunnelReq = tunnel_request_data(
self._tunneledHost, self._tunneledPort, self._proxyAuthHeader
)
protocol.transport.write(tunnelReq)
self._protocolDataReceived = protocol.dataReceived
protocol.dataReceived = self.processProxyResponse # type: ignore[method-assign]
self._protocol = protocol
return protocol
def processProxyResponse(self, data: bytes) -> None:
assert self._protocol.transport
self._connectBuffer += data
# make sure that enough (all) bytes are consumed
# and that we've got all HTTP headers (ending with a blank line)
# from the proxy so that we don't send those bytes to the TLS layer
#
# see https://github.com/scrapy/scrapy/issues/2491
if b"\r\n\r\n" not in self._connectBuffer:
return
self._protocol.dataReceived = self._protocolDataReceived # type: ignore[method-assign]
respm = TunnelingTCP4ClientEndpoint._responseMatcher.match(self._connectBuffer)
if respm and int(respm.group("status")) == 200:
# set proper Server Name Indication extension
sslOptions = self._contextFactory.creatorForNetloc( # type: ignore[call-arg,misc]
self._tunneledHost, self._tunneledPort
)
self._protocol.transport.startTLS(sslOptions, self._protocolFactory)
self._tunnelReadyDeferred.callback(self._protocol)
else:
extra: Any
if respm:
extra = {
"status": int(respm.group("status")),
"reason": respm.group("reason").strip(),
}
else:
extra = data[: self._truncatedLength]
self._tunnelReadyDeferred.errback(
TunnelError(
"Could not open CONNECT tunnel with proxy "
f"{self._host}:{self._port} [{extra!r}]"
)
)
def connectFailed(self, reason: Failure) -> None:
self._tunnelReadyDeferred.errback(reason)
def connect(self, protocolFactory: Factory) -> Deferred[Protocol]:
self._protocolFactory = protocolFactory
connectDeferred = super().connect(protocolFactory)
connectDeferred.addCallback(self.requestTunnel)
connectDeferred.addErrback(self.connectFailed)
return self._tunnelReadyDeferred
def tunnel_request_data(
host: str, port: int, proxy_auth_header: bytes | None = None
) -> bytes:
host_value = to_bytes(host, encoding="ascii") + b":" + to_bytes(str(port))
tunnel_req = b"CONNECT " + host_value + b" HTTP/1.1\r\n"
tunnel_req += b"Host: " + host_value + b"\r\n"
if proxy_auth_header:
tunnel_req += b"Proxy-Authorization: " + proxy_auth_header + b"\r\n"
tunnel_req += b"\r\n"
return tunnel_req
class TunnelingAgent(Agent):
def __init__(
self,
*,
reactor: ReactorBase,
proxyConf: tuple[str, int, bytes | None],
contextFactory: IPolicyForHTTPS,
connectTimeout: float | None = None,
bindAddress: bytes | None = None,
pool: HTTPConnectionPool | None = None,
):
super().__init__(reactor, contextFactory, connectTimeout, bindAddress, pool)
self._proxyConf: tuple[str, int, bytes | None] = proxyConf
self._contextFactory: IPolicyForHTTPS = contextFactory
def _getEndpoint(self, uri: URI) -> TunnelingTCP4ClientEndpoint:
return TunnelingTCP4ClientEndpoint(
reactor=self._reactor,
host=uri.host,
port=uri.port,
proxyConf=self._proxyConf,
contextFactory=self._contextFactory,
timeout=self._endpointFactory._connectTimeout,
bindAddress=self._endpointFactory._bindAddress,
)
def _requestWithEndpoint(
self,
key: Any,
endpoint: TCP4ClientEndpoint,
method: bytes,
parsedURI: URI,
headers: TxHeaders | None,
bodyProducer: IBodyProducer | None,
requestPath: bytes,
) -> Deferred[IResponse]:
# proxy host and port are required for HTTP pool `key`
# otherwise, same remote host connection request could reuse
# a cached tunneled connection to a different proxy
key += self._proxyConf
return super()._requestWithEndpoint(
key=key,
endpoint=endpoint,
method=method,
parsedURI=parsedURI,
headers=headers,
bodyProducer=bodyProducer,
requestPath=requestPath,
)
class ScrapyProxyAgent(Agent):
def __init__(
self,
reactor: ReactorBase,
proxyURI: bytes,
connectTimeout: float | None = None,
bindAddress: bytes | None = None,
pool: HTTPConnectionPool | None = None,
):
super().__init__(
reactor=reactor,
connectTimeout=connectTimeout,
bindAddress=bindAddress,
pool=pool,
)
self._proxyURI: URI = URI.fromBytes(proxyURI)
def request(
self,
method: bytes,
uri: bytes,
headers: TxHeaders | None = None,
bodyProducer: IBodyProducer | None = None,
) -> Deferred[IResponse]:
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
return self._requestWithEndpoint(
key=(b"http-proxy", self._proxyURI.host, self._proxyURI.port),
endpoint=self._getEndpoint(self._proxyURI),
method=method,
parsedURI=URI.fromBytes(uri),
headers=headers,
bodyProducer=bodyProducer,
requestPath=uri,
)
class ScrapyAgent:
_Agent = Agent
_ProxyAgent = ScrapyProxyAgent
_TunnelingAgent = TunnelingAgent
def __init__(
self,
*,
contextFactory: IPolicyForHTTPS,
connectTimeout: float = 10,
bindAddress: bytes | None = None,
pool: HTTPConnectionPool | None = None,
maxsize: int = 0,
warnsize: int = 0,
fail_on_dataloss: bool = True,
crawler: Crawler,
):
self._contextFactory: IPolicyForHTTPS = contextFactory
self._connectTimeout: float = connectTimeout
self._bindAddress: bytes | None = bindAddress
self._pool: HTTPConnectionPool | None = pool
self._maxsize: int = maxsize
self._warnsize: int = warnsize
self._fail_on_dataloss: bool = fail_on_dataloss
self._txresponse: TxResponse | None = None
self._crawler: Crawler = crawler
def _get_agent(self, request: Request, timeout: float) -> Agent:
from twisted.internet import reactor
bindaddress = request.meta.get("bindaddress") or self._bindAddress
proxy = request.meta.get("proxy")
if proxy:
proxy = add_http_if_no_scheme(proxy)
proxy_parsed = urlparse(proxy)
proxy_host = proxy_parsed.hostname
proxy_port = proxy_parsed.port
if not proxy_port:
proxy_port = 443 if proxy_parsed.scheme == "https" else 80
if urlparse_cached(request).scheme == "https":
assert proxy_host is not None
proxyAuth = request.headers.get(b"Proxy-Authorization", None)
proxyConf = (proxy_host, proxy_port, proxyAuth)
return self._TunnelingAgent(
reactor=reactor,
proxyConf=proxyConf,
contextFactory=self._contextFactory,
connectTimeout=timeout,
bindAddress=bindaddress,
pool=self._pool,
)
return self._ProxyAgent(
reactor=reactor,
proxyURI=to_bytes(proxy, encoding="ascii"),
connectTimeout=timeout,
bindAddress=bindaddress,
pool=self._pool,
)
return self._Agent(
reactor=reactor,
contextFactory=self._contextFactory,
connectTimeout=timeout,
bindAddress=bindaddress,
pool=self._pool,
)
def download_request(self, request: Request) -> Deferred[Response]:
from twisted.internet import reactor
timeout = request.meta.get("download_timeout") or self._connectTimeout
agent = self._get_agent(request, timeout)
# request details
url = urldefrag(request.url)[0]
method = to_bytes(request.method)
headers = TxHeaders(request.headers)
if isinstance(agent, self._TunnelingAgent):
headers.removeHeader(b"Proxy-Authorization")
bodyproducer = _RequestBodyProducer(request.body) if request.body else None
start_time = time()
d: Deferred[IResponse] = agent.request(
method,
to_bytes(url, encoding="ascii"),
headers,
cast("IBodyProducer", bodyproducer),
)
# set download latency
d.addCallback(self._cb_latency, request, start_time)
# response body is ready to be consumed
d2: Deferred[_ResultT] = d.addCallback(self._cb_bodyready, request)
d3: Deferred[Response] = d2.addCallback(self._cb_bodydone, url)
# check download timeout
self._timeout_cl = reactor.callLater(timeout, d3.cancel)
d3.addBoth(self._cb_timeout, request, url, timeout)
return d3
def _cb_timeout(self, result: _T, request: Request, url: str, timeout: float) -> _T:
if self._timeout_cl.active():
self._timeout_cl.cancel()
return result
# needed for HTTPS requests, otherwise _ResponseReader doesn't
# receive connectionLost()
if self._txresponse:
self._txresponse._transport.stopProducing()
raise DownloadTimeoutError(f"Getting {url} took longer than {timeout} seconds.")
def _cb_latency(self, result: _T, request: Request, start_time: float) -> _T:
request.meta["download_latency"] = time() - start_time
return result
@staticmethod
def _headers_from_twisted_response(response: TxResponse) -> Headers:
headers = Headers()
if response.length != UNKNOWN_LENGTH:
headers[b"Content-Length"] = str(response.length).encode()
headers.update(response.headers.getAllRawHeaders())
return headers
def _cb_bodyready(
self, txresponse: TxResponse, request: Request
) -> _ResultT | Deferred[_ResultT]:
if stop_download := check_stop_download(
signals.headers_received,
self._crawler,
request,
headers=self._headers_from_twisted_response(txresponse),
body_length=txresponse.length,
):
txresponse._transport.stopProducing()
txresponse._transport.loseConnection()
return {
"txresponse": txresponse,
"stop_download": stop_download,
}
# deliverBody hangs for responses without body
if cast("int", txresponse.length) == 0:
return {
"txresponse": txresponse,
}
maxsize = request.meta.get("download_maxsize", self._maxsize)
warnsize = request.meta.get("download_warnsize", self._warnsize)
expected_size = (
cast("int", txresponse.length)
if txresponse.length != UNKNOWN_LENGTH
else -1
)
fail_on_dataloss = request.meta.get(
"download_fail_on_dataloss", self._fail_on_dataloss
)
if maxsize and expected_size > maxsize:
warning_msg = get_maxsize_msg(
expected_size, maxsize, request, expected=True
)
logger.warning(warning_msg)
txresponse._transport.loseConnection()
raise DownloadCancelledError(warning_msg)
if warnsize and expected_size > warnsize:
logger.warning(
get_warnsize_msg(expected_size, warnsize, request, expected=True)
)
def _cancel(_: Any) -> None:
# Abort connection immediately.
txresponse._transport._producer.abortConnection()
d: Deferred[_ResultT] = Deferred(_cancel)
txresponse.deliverBody(
_ResponseReader(
finished=d,
txresponse=txresponse,
request=request,
maxsize=maxsize,
warnsize=warnsize,
fail_on_dataloss=fail_on_dataloss,
crawler=self._crawler,
)
)
# save response for timeouts
self._txresponse = txresponse
return d
def _cb_bodydone(self, result: _ResultT, url: str) -> Response:
headers = self._headers_from_twisted_response(result["txresponse"])
try:
version = result["txresponse"].version
protocol = f"{to_unicode(version[0])}/{version[1]}.{version[2]}"
except (AttributeError, TypeError, IndexError):
protocol = None
return make_response(
url=url,
status=int(result["txresponse"].code),
headers=headers,
body=result.get("body", b""),
flags=result.get("flags"),
certificate=result.get("certificate"),
ip_address=result.get("ip_address"),
protocol=protocol,
stop_download=result.get("stop_download"),
)
@implementer(IBodyProducer)
class _RequestBodyProducer:
def __init__(self, body: bytes):
self.body = body
self.length = len(body)
def startProducing(self, consumer: IConsumer) -> Deferred[None]:
consumer.write(self.body)
return succeed(None)
def pauseProducing(self) -> None:
pass
def stopProducing(self) -> None:
pass
class _ResponseReader(Protocol):
def __init__(
self,
finished: Deferred[_ResultT],
txresponse: TxResponse,
request: Request,
maxsize: int,
warnsize: int,
fail_on_dataloss: bool,
crawler: Crawler,
):
self._finished: Deferred[_ResultT] = finished
self._txresponse: TxResponse = txresponse
self._request: Request = request
self._bodybuf: BytesIO = BytesIO()
self._maxsize: int = maxsize
self._warnsize: int = warnsize
self._fail_on_dataloss: bool = fail_on_dataloss
self._reached_warnsize: bool = False
self._bytes_received: int = 0
self._certificate: ssl.Certificate | None = None
self._ip_address: ipaddress.IPv4Address | ipaddress.IPv6Address | None = None
self._crawler: Crawler = crawler
def _finish_response(
self, flags: list[str] | None = None, stop_download: StopDownload | None = None
) -> None:
self._finished.callback(
{
"txresponse": self._txresponse,
"body": self._bodybuf.getvalue(),
"flags": flags,
"certificate": self._certificate,
"ip_address": self._ip_address,
"stop_download": stop_download,
}
)
def connectionMade(self) -> None:
assert self.transport
if self._certificate is None:
with suppress(AttributeError):
self._certificate = ssl.Certificate(
self.transport._producer.getPeerCertificate()
)
if self._ip_address is None:
self._ip_address = ipaddress.ip_address(
self.transport._producer.getPeer().host
)
def dataReceived(self, bodyBytes: bytes) -> None:
# This maybe called several times after cancel was called with buffered data.
if self._finished.called:
return
assert self.transport
self._bodybuf.write(bodyBytes)
self._bytes_received += len(bodyBytes)
if stop_download := check_stop_download(
signals.bytes_received, self._crawler, self._request, data=bodyBytes
):
self.transport.stopProducing()
self.transport.loseConnection()
self._finish_response(stop_download=stop_download)
if self._maxsize and self._bytes_received > self._maxsize:
logger.warning(
get_maxsize_msg(
self._bytes_received, self._maxsize, self._request, expected=False
)
)
# Clear buffer earlier to avoid keeping data in memory for a long time.
self._bodybuf.truncate(0)
self._finished.cancel()
if (
self._warnsize
and self._bytes_received > self._warnsize
and not self._reached_warnsize
):
self._reached_warnsize = True
logger.warning(
get_warnsize_msg(
self._bytes_received, self._warnsize, self._request, expected=False
)
)
def connectionLost(self, reason: Failure = connectionDone) -> None:
if self._finished.called:
return
if reason.check(ResponseDone):
self._finish_response()
return
if reason.check(PotentialDataLoss):
self._finish_response(flags=["partial"])
return
if reason.check(ResponseFailed) and any(
r.check(_DataLoss)
for r in reason.value.reasons # type: ignore[union-attr]
):
if not self._fail_on_dataloss:
self._finish_response(flags=["dataloss"])
return
exc = ResponseDataLossError()
exc.__cause__ = reason.value
reason = Failure(exc)
self._finished.errback(reason) | --- +++ @@ -1,3 +1,4 @@+"""Download handlers for http and https schemes"""
from __future__ import annotations
@@ -95,6 +96,7 @@ self._disconnect_timeout: int = 1
async def download_request(self, request: Request) -> Response:
+ """Return a deferred for the HTTP download"""
if hasattr(self._crawler.spider, "download_maxsize"): # pragma: no cover
warn_on_deprecated_spider_attribute("download_maxsize", "DOWNLOAD_MAXSIZE")
if hasattr(self._crawler.spider, "download_warnsize"): # pragma: no cover
@@ -147,9 +149,17 @@
class TunnelError(Exception):
+ """An HTTP CONNECT tunnel could not be established by the proxy."""
class TunnelingTCP4ClientEndpoint(TCP4ClientEndpoint):
+ """An endpoint that tunnels through proxies to allow HTTPS downloads. To
+ accomplish that, this endpoint sends an HTTP CONNECT to the proxy.
+ The HTTP CONNECT is always sent when using this endpoint, I think this could
+ be improved as the CONNECT will be redundant if the connection associated
+ with this endpoint comes from the pool and a CONNECT has already been issued
+ for it.
+ """
_truncatedLength = 1000
_responseAnswer = (
@@ -176,6 +186,7 @@ self._connectBuffer: bytearray = bytearray()
def requestTunnel(self, protocol: Protocol) -> Protocol:
+ """Asks the proxy to open a tunnel."""
assert protocol.transport
tunnelReq = tunnel_request_data(
self._tunneledHost, self._tunneledPort, self._proxyAuthHeader
@@ -187,6 +198,10 @@ return protocol
def processProxyResponse(self, data: bytes) -> None:
+ """Processes the response from the proxy. If the tunnel is successfully
+ created, notifies the client that we are ready to send requests. If not
+ raises a TunnelError.
+ """
assert self._protocol.transport
self._connectBuffer += data
# make sure that enough (all) bytes are consumed
@@ -222,6 +237,7 @@ )
def connectFailed(self, reason: Failure) -> None:
+ """Propagates the errback to the appropriate deferred."""
self._tunnelReadyDeferred.errback(reason)
def connect(self, protocolFactory: Factory) -> Deferred[Protocol]:
@@ -235,6 +251,17 @@ def tunnel_request_data(
host: str, port: int, proxy_auth_header: bytes | None = None
) -> bytes:
+ r"""
+ Return binary content of a CONNECT request.
+
+ >>> from scrapy.utils.python import to_unicode as s
+ >>> s(tunnel_request_data("example.com", 8080))
+ 'CONNECT example.com:8080 HTTP/1.1\r\nHost: example.com:8080\r\n\r\n'
+ >>> s(tunnel_request_data("example.com", 8080, b"123"))
+ 'CONNECT example.com:8080 HTTP/1.1\r\nHost: example.com:8080\r\nProxy-Authorization: 123\r\n\r\n'
+ >>> s(tunnel_request_data(b"example.com", "8090"))
+ 'CONNECT example.com:8090 HTTP/1.1\r\nHost: example.com:8090\r\n\r\n'
+ """
host_value = to_bytes(host, encoding="ascii") + b":" + to_bytes(str(port))
tunnel_req = b"CONNECT " + host_value + b" HTTP/1.1\r\n"
tunnel_req += b"Host: " + host_value + b"\r\n"
@@ -245,6 +272,12 @@
class TunnelingAgent(Agent):
+ """An agent that uses a L{TunnelingTCP4ClientEndpoint} to make HTTPS
+ downloads. It may look strange that we have chosen to subclass Agent and not
+ ProxyAgent but consider that after the tunnel is opened the proxy is
+ transparent to the client; thus the agent should behave like there is no
+ proxy involved.
+ """
def __init__(
self,
@@ -320,6 +353,9 @@ headers: TxHeaders | None = None,
bodyProducer: IBodyProducer | None = None,
) -> Deferred[IResponse]:
+ """
+ Issue a new request via the configured proxy.
+ """
# Cache *all* connections under the same key, since we are only
# connecting to a single destination, the proxy:
return self._requestWithEndpoint(
@@ -672,4 +708,4 @@ exc.__cause__ = reason.value
reason = Failure(exc)
- self._finished.errback(reason)+ self._finished.errback(reason)
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/downloader/handlers/http11.py |
Write docstrings including parameters and return values | from __future__ import annotations
from collections import deque
from typing import TYPE_CHECKING
from twisted.internet import defer
from twisted.internet.defer import Deferred
from twisted.python.failure import Failure
from twisted.web.client import (
URI,
BrowserLikePolicyForHTTPS,
ResponseFailed,
_StandardEndpointFactory,
)
from twisted.web.error import SchemeNotSupported
from scrapy.core.downloader.contextfactory import AcceptableProtocolsContextFactory
from scrapy.core.http2.protocol import H2ClientFactory, H2ClientProtocol
if TYPE_CHECKING:
from twisted.internet.base import ReactorBase
from twisted.internet.endpoints import HostnameEndpoint
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.spiders import Spider
ConnectionKeyT = tuple[bytes, bytes, int]
class H2ConnectionPool:
def __init__(self, reactor: ReactorBase, settings: Settings) -> None:
self._reactor = reactor
self.settings = settings
# Store a dictionary which is used to get the respective
# H2ClientProtocolInstance using the key as Tuple(scheme, hostname, port)
self._connections: dict[ConnectionKeyT, H2ClientProtocol] = {}
# Save all requests that arrive before the connection is established
self._pending_requests: dict[
ConnectionKeyT, deque[Deferred[H2ClientProtocol]]
] = {}
def get_connection(
self, key: ConnectionKeyT, uri: URI, endpoint: HostnameEndpoint
) -> Deferred[H2ClientProtocol]:
if key in self._pending_requests:
# Received a request while connecting to remote
# Create a deferred which will fire with the H2ClientProtocol
# instance
d: Deferred[H2ClientProtocol] = Deferred()
self._pending_requests[key].append(d)
return d
# Check if we already have a connection to the remote
conn = self._connections.get(key, None)
if conn:
# Return this connection instance wrapped inside a deferred
return defer.succeed(conn)
# No connection is established for the given URI
return self._new_connection(key, uri, endpoint)
def _new_connection(
self, key: ConnectionKeyT, uri: URI, endpoint: HostnameEndpoint
) -> Deferred[H2ClientProtocol]:
self._pending_requests[key] = deque()
conn_lost_deferred: Deferred[list[BaseException]] = Deferred()
conn_lost_deferred.addCallback(self._remove_connection, key)
factory = H2ClientFactory(uri, self.settings, conn_lost_deferred)
conn_d = endpoint.connect(factory)
conn_d.addCallback(self.put_connection, key)
d: Deferred[H2ClientProtocol] = Deferred()
self._pending_requests[key].append(d)
return d
def put_connection(
self, conn: H2ClientProtocol, key: ConnectionKeyT
) -> H2ClientProtocol:
self._connections[key] = conn
# Now as we have established a proper HTTP/2 connection
# we fire all the deferred's with the connection instance
pending_requests = self._pending_requests.pop(key, None)
while pending_requests:
d = pending_requests.popleft()
d.callback(conn)
return conn
def _remove_connection(
self, errors: list[BaseException], key: ConnectionKeyT
) -> None:
self._connections.pop(key)
# Call the errback of all the pending requests for this connection
pending_requests = self._pending_requests.pop(key, None)
while pending_requests:
d = pending_requests.popleft()
d.errback(ResponseFailed(errors))
def close_connections(self) -> None:
for conn in self._connections.values():
assert conn.transport is not None # typing
conn.transport.abortConnection()
class H2Agent:
def __init__(
self,
reactor: ReactorBase,
pool: H2ConnectionPool,
context_factory: BrowserLikePolicyForHTTPS = BrowserLikePolicyForHTTPS(), # noqa: B008
connect_timeout: float | None = None,
bind_address: bytes | None = None,
) -> None:
self._reactor = reactor
self._pool = pool
self._context_factory = AcceptableProtocolsContextFactory(
context_factory, acceptable_protocols=[b"h2"]
)
self.endpoint_factory = _StandardEndpointFactory(
self._reactor, self._context_factory, connect_timeout, bind_address
)
def get_endpoint(self, uri: URI) -> HostnameEndpoint:
return self.endpoint_factory.endpointForURI(uri)
def get_key(self, uri: URI) -> ConnectionKeyT:
return uri.scheme, uri.host, uri.port
def request(self, request: Request, spider: Spider) -> Deferred[Response]:
uri = URI.fromBytes(bytes(request.url, encoding="utf-8"))
try:
endpoint = self.get_endpoint(uri)
except SchemeNotSupported:
return defer.fail(Failure())
key = self.get_key(uri)
d: Deferred[H2ClientProtocol] = self._pool.get_connection(key, uri, endpoint)
d2: Deferred[Response] = d.addCallback(
lambda conn: conn.request(request, spider)
)
return d2
class ScrapyProxyH2Agent(H2Agent):
def __init__(
self,
reactor: ReactorBase,
proxy_uri: URI,
pool: H2ConnectionPool,
context_factory: BrowserLikePolicyForHTTPS = BrowserLikePolicyForHTTPS(), # noqa: B008
connect_timeout: float | None = None,
bind_address: bytes | None = None,
) -> None:
super().__init__(
reactor=reactor,
pool=pool,
context_factory=context_factory,
connect_timeout=connect_timeout,
bind_address=bind_address,
)
self._proxy_uri = proxy_uri
def get_endpoint(self, uri: URI) -> HostnameEndpoint:
return self.endpoint_factory.endpointForURI(self._proxy_uri)
def get_key(self, uri: URI) -> ConnectionKeyT:
return b"http-proxy", self._proxy_uri.host, self._proxy_uri.port | --- +++ @@ -105,6 +105,11 @@ d.errback(ResponseFailed(errors))
def close_connections(self) -> None:
+ """Close all the HTTP/2 connections and remove them from pool
+
+ Returns:
+ Deferred that fires when all connections have been closed
+ """
for conn in self._connections.values():
assert conn.transport is not None # typing
conn.transport.abortConnection()
@@ -132,6 +137,10 @@ return self.endpoint_factory.endpointForURI(uri)
def get_key(self, uri: URI) -> ConnectionKeyT:
+ """
+ Arguments:
+ uri - URI obtained directly from request URL
+ """
return uri.scheme, uri.host, uri.port
def request(self, request: Request, spider: Spider) -> Deferred[Response]:
@@ -172,4 +181,5 @@ return self.endpoint_factory.endpointForURI(self._proxy_uri)
def get_key(self, uri: URI) -> ConnectionKeyT:
- return b"http-proxy", self._proxy_uri.host, self._proxy_uri.port+ """We use the proxy uri instead of uri obtained from request url"""
+ return b"http-proxy", self._proxy_uri.host, self._proxy_uri.port
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/http2/agent.py |
Generate consistent docstrings | from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any
from OpenSSL import SSL
from twisted.internet._sslverify import _setAcceptableProtocols
from twisted.internet.ssl import (
AcceptableCiphers,
CertificateOptions,
optionsForClientTLS,
platformTrust,
)
from twisted.web.client import BrowserLikePolicyForHTTPS
from twisted.web.iweb import IPolicyForHTTPS
from zope.interface.declarations import implementer
from zope.interface.verify import verifyObject
from scrapy.core.downloader.tls import (
DEFAULT_CIPHERS,
ScrapyClientTLSOptions,
openssl_methods,
)
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.deprecate import method_is_overridden
from scrapy.utils.misc import build_from_crawler, load_object
if TYPE_CHECKING:
from twisted.internet._sslverify import ClientTLSOptions
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.settings import BaseSettings
@implementer(IPolicyForHTTPS)
class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
def __init__(
self,
method: int = SSL.SSLv23_METHOD, # noqa: S503
tls_verbose_logging: bool = False,
tls_ciphers: str | None = None,
*args: Any,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self._ssl_method: int = method
self.tls_verbose_logging: bool = tls_verbose_logging
self.tls_ciphers: AcceptableCiphers
if tls_ciphers:
self.tls_ciphers = AcceptableCiphers.fromOpenSSLCipherString(tls_ciphers)
else:
self.tls_ciphers = DEFAULT_CIPHERS
if method_is_overridden(type(self), ScrapyClientContextFactory, "getContext"):
warnings.warn(
"Overriding ScrapyClientContextFactory.getContext() is deprecated and that method"
" will be removed in a future Scrapy version. Override creatorForNetloc() instead.",
category=ScrapyDeprecationWarning,
stacklevel=2,
)
@classmethod
def from_crawler(
cls,
crawler: Crawler,
method: int = SSL.SSLv23_METHOD, # noqa: S503
*args: Any,
**kwargs: Any,
) -> Self:
tls_verbose_logging: bool = crawler.settings.getbool(
"DOWNLOADER_CLIENT_TLS_VERBOSE_LOGGING"
)
tls_ciphers: str | None = crawler.settings["DOWNLOADER_CLIENT_TLS_CIPHERS"]
return cls( # type: ignore[misc]
*args,
method=method,
tls_verbose_logging=tls_verbose_logging,
tls_ciphers=tls_ciphers,
**kwargs,
)
def getCertificateOptions(self) -> CertificateOptions:
# setting verify=True will require you to provide CAs
# to verify against; in other words: it's not that simple
return CertificateOptions(
verify=False,
method=self._ssl_method,
fixBrokenPeers=True,
acceptableCiphers=self.tls_ciphers,
)
# kept for old-style HTTP/1.0 downloader context twisted calls,
# e.g. connectSSL()
def getContext(self, hostname: Any = None, port: Any = None) -> SSL.Context:
ctx: SSL.Context = self.getCertificateOptions().getContext()
ctx.set_options(0x4) # OP_LEGACY_SERVER_CONNECT
return ctx
def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:
return ScrapyClientTLSOptions(
hostname.decode("ascii"),
self.getContext(),
verbose_logging=self.tls_verbose_logging,
)
@implementer(IPolicyForHTTPS)
class BrowserLikeContextFactory(ScrapyClientContextFactory):
def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:
# trustRoot set to platformTrust() will use the platform's root CAs.
#
# This means that a website like https://www.cacert.org will be rejected
# by default, since CAcert.org CA certificate is seldom shipped.
return optionsForClientTLS(
hostname=hostname.decode("ascii"),
trustRoot=platformTrust(),
extraCertificateOptions={"method": self._ssl_method},
)
@implementer(IPolicyForHTTPS)
class AcceptableProtocolsContextFactory:
def __init__(self, context_factory: Any, acceptable_protocols: list[bytes]):
verifyObject(IPolicyForHTTPS, context_factory)
self._wrapped_context_factory: Any = context_factory
self._acceptable_protocols: list[bytes] = acceptable_protocols
def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:
options: ClientTLSOptions = self._wrapped_context_factory.creatorForNetloc(
hostname, port
)
_setAcceptableProtocols(options._ctx, self._acceptable_protocols)
return options
def load_context_factory_from_settings(
settings: BaseSettings, crawler: Crawler
) -> IPolicyForHTTPS:
ssl_method = openssl_methods[settings.get("DOWNLOADER_CLIENT_TLS_METHOD")]
context_factory_cls = load_object(settings["DOWNLOADER_CLIENTCONTEXTFACTORY"])
# try method-aware context factory
try:
context_factory = build_from_crawler(
context_factory_cls,
crawler,
method=ssl_method,
)
except TypeError:
# use context factory defaults
context_factory = build_from_crawler(
context_factory_cls,
crawler,
)
msg = (
f"{settings['DOWNLOADER_CLIENTCONTEXTFACTORY']} does not accept "
"a `method` argument (type OpenSSL.SSL method, e.g. "
"OpenSSL.SSL.SSLv23_METHOD) and/or a `tls_verbose_logging` "
"argument and/or a `tls_ciphers` argument. Please, upgrade your "
"context factory class to handle them or ignore them."
)
warnings.warn(msg)
return context_factory | --- +++ @@ -37,6 +37,15 @@
@implementer(IPolicyForHTTPS)
class ScrapyClientContextFactory(BrowserLikePolicyForHTTPS):
+ """
+ Non-peer-certificate verifying HTTPS context factory
+
+ Default OpenSSL method is TLS_METHOD (also called SSLv23_METHOD)
+ which allows TLS protocol negotiation
+
+ 'A TLS/SSL connection established with [this method] may
+ understand the TLSv1, TLSv1.1 and TLSv1.2 protocols.'
+ """
def __init__(
self,
@@ -109,6 +118,22 @@
@implementer(IPolicyForHTTPS)
class BrowserLikeContextFactory(ScrapyClientContextFactory):
+ """
+ Twisted-recommended context factory for web clients.
+
+ Quoting the documentation of the :class:`~twisted.web.client.Agent` class:
+
+ The default is to use a
+ :class:`~twisted.web.client.BrowserLikePolicyForHTTPS`, so unless you
+ have special requirements you can leave this as-is.
+
+ :meth:`creatorForNetloc` is the same as
+ :class:`~twisted.web.client.BrowserLikePolicyForHTTPS` except this context
+ factory allows setting the TLS/SSL method to use.
+
+ The default OpenSSL method is ``TLS_METHOD`` (also called
+ ``SSLv23_METHOD``) which allows TLS protocol negotiation.
+ """
def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:
# trustRoot set to platformTrust() will use the platform's root CAs.
@@ -124,6 +149,10 @@
@implementer(IPolicyForHTTPS)
class AcceptableProtocolsContextFactory:
+ """Context factory to used to override the acceptable protocols
+ to set up the [OpenSSL.SSL.Context] for doing NPN and/or ALPN
+ negotiation.
+ """
def __init__(self, context_factory: Any, acceptable_protocols: list[bytes]):
verifyObject(IPolicyForHTTPS, context_factory)
@@ -165,4 +194,4 @@ )
warnings.warn(msg)
- return context_factory+ return context_factory
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/downloader/contextfactory.py |
Add docstrings to my Python code |
from __future__ import annotations
import logging
import warnings
from collections import deque
from collections.abc import AsyncIterator
from typing import TYPE_CHECKING, Any, TypeAlias, TypeVar
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.python.failure import Failure
from scrapy import Spider, signals
from scrapy.core.spidermw import SpiderMiddlewareManager
from scrapy.exceptions import (
CloseSpider,
DropItem,
IgnoreRequest,
ScrapyDeprecationWarning,
)
from scrapy.http import Request, Response
from scrapy.pipelines import ItemPipelineManager
from scrapy.utils.asyncio import _parallel_asyncio, is_asyncio_available
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.defer import (
_defer_sleep_async,
_schedule_coro,
aiter_errback,
deferred_from_coro,
ensure_awaitable,
iter_errback,
maybe_deferred_to_future,
parallel,
parallel_async,
)
from scrapy.utils.deprecate import method_is_overridden
from scrapy.utils.log import failure_to_exc_info, logformatter_adapter
from scrapy.utils.misc import load_object, warn_on_generator_with_return_value
from scrapy.utils.python import global_object_name
from scrapy.utils.spider import iterate_spider_output
if TYPE_CHECKING:
from collections.abc import Generator, Iterable
from scrapy.crawler import Crawler
from scrapy.logformatter import LogFormatter
from scrapy.signalmanager import SignalManager
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
QueueTuple: TypeAlias = tuple[Response | Failure, Request, Deferred[None]]
class Slot:
MIN_RESPONSE_SIZE = 1024
def __init__(self, max_active_size: int = 5000000):
self.max_active_size: int = max_active_size
self.queue: deque[QueueTuple] = deque()
self.active: set[Request] = set()
self.active_size: int = 0
self.itemproc_size: int = 0
self.closing: Deferred[Spider] | None = None
def add_response_request(
self, result: Response | Failure, request: Request
) -> Deferred[None]:
# this Deferred will be awaited in enqueue_scrape()
deferred: Deferred[None] = Deferred()
self.queue.append((result, request, deferred))
if isinstance(result, Response):
self.active_size += max(len(result.body), self.MIN_RESPONSE_SIZE)
else:
self.active_size += self.MIN_RESPONSE_SIZE
return deferred
def next_response_request_deferred(self) -> QueueTuple:
result, request, deferred = self.queue.popleft()
self.active.add(request)
return result, request, deferred
def finish_response(self, result: Response | Failure, request: Request) -> None:
self.active.remove(request)
if isinstance(result, Response):
self.active_size -= max(len(result.body), self.MIN_RESPONSE_SIZE)
else:
self.active_size -= self.MIN_RESPONSE_SIZE
def is_idle(self) -> bool:
return not (self.queue or self.active)
def needs_backout(self) -> bool:
return self.active_size > self.max_active_size
class Scraper:
def __init__(self, crawler: Crawler) -> None:
self.slot: Slot | None = None
self.spidermw: SpiderMiddlewareManager = SpiderMiddlewareManager.from_crawler(
crawler
)
itemproc_cls: type[ItemPipelineManager] = load_object(
crawler.settings["ITEM_PROCESSOR"]
)
self.itemproc: ItemPipelineManager = itemproc_cls.from_crawler(crawler)
self._itemproc_has_async: dict[str, bool] = {}
for method in [
"open_spider",
"close_spider",
"process_item",
]:
self._check_deprecated_itemproc_method(method)
self.concurrent_items: int = crawler.settings.getint("CONCURRENT_ITEMS")
self.crawler: Crawler = crawler
self.signals: SignalManager = crawler.signals
assert crawler.logformatter
self.logformatter: LogFormatter = crawler.logformatter
def _check_deprecated_itemproc_method(self, method: str) -> None:
itemproc_cls = type(self.itemproc)
if not hasattr(self.itemproc, "process_item_async"):
warnings.warn(
f"{global_object_name(itemproc_cls)} doesn't define a {method}_async() method,"
f" this is deprecated and the method will be required in future Scrapy versions.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self._itemproc_has_async[method] = False
elif (
issubclass(itemproc_cls, ItemPipelineManager)
and method_is_overridden(itemproc_cls, ItemPipelineManager, method)
and not method_is_overridden(
itemproc_cls, ItemPipelineManager, f"{method}_async"
)
):
warnings.warn(
f"{global_object_name(itemproc_cls)} overrides {method}() but doesn't override {method}_async()."
f" This is deprecated. {method}() will be used, but in future Scrapy versions {method}_async() will be used instead.",
ScrapyDeprecationWarning,
stacklevel=2,
)
self._itemproc_has_async[method] = False
else:
self._itemproc_has_async[method] = True
def open_spider(
self, spider: Spider | None = None
) -> Deferred[None]: # pragma: no cover
warnings.warn(
"Scraper.open_spider() is deprecated, use open_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.open_spider_async())
async def open_spider_async(self) -> None:
self.slot = Slot(self.crawler.settings.getint("SCRAPER_SLOT_MAX_ACTIVE_SIZE"))
if not self.crawler.spider:
raise RuntimeError(
"Scraper.open_spider() called before Crawler.spider is set."
)
if self._itemproc_has_async["open_spider"]:
await self.itemproc.open_spider_async()
else:
await maybe_deferred_to_future(
self.itemproc.open_spider(self.crawler.spider)
)
def close_spider(
self, spider: Spider | None = None
) -> Deferred[None]: # pragma: no cover
warnings.warn(
"Scraper.close_spider() is deprecated, use close_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.close_spider_async())
async def close_spider_async(self) -> None:
if self.slot is None:
raise RuntimeError("Scraper slot not assigned")
self.slot.closing = Deferred()
self._check_if_closing()
await maybe_deferred_to_future(self.slot.closing)
if self._itemproc_has_async["close_spider"]:
await self.itemproc.close_spider_async()
else:
assert self.crawler.spider
await maybe_deferred_to_future(
self.itemproc.close_spider(self.crawler.spider)
)
def is_idle(self) -> bool:
return not self.slot
def _check_if_closing(self) -> None:
assert self.slot is not None # typing
if self.slot.closing and self.slot.is_idle():
assert self.crawler.spider
self.slot.closing.callback(self.crawler.spider)
@inlineCallbacks
@_warn_spider_arg
def enqueue_scrape(
self, result: Response | Failure, request: Request, spider: Spider | None = None
) -> Generator[Deferred[Any], Any, None]:
if self.slot is None:
raise RuntimeError("Scraper slot not assigned")
dfd = self.slot.add_response_request(result, request)
self._scrape_next()
try:
yield dfd # fired in _wait_for_processing()
except Exception:
logger.error(
"Scraper bug processing %(request)s",
{"request": request},
exc_info=True,
extra={"spider": self.crawler.spider},
)
finally:
self.slot.finish_response(result, request)
self._check_if_closing()
self._scrape_next()
def _scrape_next(self) -> None:
assert self.slot is not None # typing
while self.slot.queue:
result, request, queue_dfd = self.slot.next_response_request_deferred()
_schedule_coro(self._wait_for_processing(result, request, queue_dfd))
async def _scrape(self, result: Response | Failure, request: Request) -> None:
if not isinstance(result, (Response, Failure)):
raise TypeError(
f"Incorrect type: expected Response or Failure, got {type(result)}: {result!r}"
)
output: Iterable[Any] | AsyncIterator[Any]
if isinstance(result, Response):
try:
# call the spider middlewares and the request callback with the response
output = await self.spidermw.scrape_response_async(
self.call_spider_async, result, request
)
except Exception:
self.handle_spider_error(Failure(), request, result)
else:
await self.handle_spider_output_async(output, request, result)
return
try:
# call the request errback with the downloader error
output = await self.call_spider_async(result, request)
except Exception as spider_exc:
# the errback didn't silence the exception
assert self.crawler.spider
if not result.check(IgnoreRequest):
logkws = self.logformatter.download_error(
result, request, self.crawler.spider
)
logger.log(
*logformatter_adapter(logkws),
extra={"spider": self.crawler.spider},
exc_info=failure_to_exc_info(result),
)
if spider_exc is not result.value:
# the errback raised a different exception, handle it
self.handle_spider_error(Failure(), request, result)
else:
await self.handle_spider_output_async(output, request, result)
async def _wait_for_processing(
self, result: Response | Failure, request: Request, queue_dfd: Deferred[None]
) -> None:
try:
await self._scrape(result, request)
except Exception:
queue_dfd.errback(Failure())
else:
queue_dfd.callback(None) # awaited in enqueue_scrape()
def call_spider(
self, result: Response | Failure, request: Request, spider: Spider | None = None
) -> Deferred[Iterable[Any] | AsyncIterator[Any]]: # pragma: no cover
warnings.warn(
"Scraper.call_spider() is deprecated, use call_spider_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.call_spider_async(result, request))
async def call_spider_async(
self, result: Response | Failure, request: Request
) -> Iterable[Any] | AsyncIterator[Any]:
await _defer_sleep_async()
assert self.crawler.spider
if isinstance(result, Response):
if getattr(result, "request", None) is None:
result.request = request
assert result.request
callback = result.request.callback or self.crawler.spider._parse
warn_on_generator_with_return_value(self.crawler.spider, callback)
output = callback(result, **result.request.cb_kwargs)
if isinstance(output, Deferred):
warnings.warn(
f"{callback} returned a Deferred."
f" Returning Deferreds from spider callbacks is deprecated.",
ScrapyDeprecationWarning,
stacklevel=2,
)
else: # result is a Failure
# TODO: properly type adding this attribute to a Failure
result.request = request # type: ignore[attr-defined]
if not request.errback:
result.raiseException()
warn_on_generator_with_return_value(self.crawler.spider, request.errback)
output = request.errback(result)
if isinstance(output, Failure):
output.raiseException()
# else the errback returned actual output (like a callback),
# which needs to be passed to iterate_spider_output()
if isinstance(output, Deferred):
warnings.warn(
f"{request.errback} returned a Deferred."
f" Returning Deferreds from spider errbacks is deprecated.",
ScrapyDeprecationWarning,
stacklevel=2,
)
return await ensure_awaitable(iterate_spider_output(output))
@_warn_spider_arg
def handle_spider_error(
self,
_failure: Failure,
request: Request,
response: Response | Failure,
spider: Spider | None = None,
) -> None:
assert self.crawler.spider
exc = _failure.value
if isinstance(exc, CloseSpider):
assert self.crawler.engine is not None # typing
_schedule_coro(
self.crawler.engine.close_spider_async(reason=exc.reason or "cancelled")
)
return
logkws = self.logformatter.spider_error(
_failure, request, response, self.crawler.spider
)
logger.log(
*logformatter_adapter(logkws),
exc_info=failure_to_exc_info(_failure),
extra={"spider": self.crawler.spider},
)
self.signals.send_catch_log(
signal=signals.spider_error,
failure=_failure,
response=response,
spider=self.crawler.spider,
)
assert self.crawler.stats
self.crawler.stats.inc_value("spider_exceptions/count")
self.crawler.stats.inc_value(
f"spider_exceptions/{_failure.value.__class__.__name__}"
)
def handle_spider_output(
self,
result: Iterable[_T] | AsyncIterator[_T],
request: Request,
response: Response | Failure,
spider: Spider | None = None,
) -> Deferred[None]: # pragma: no cover
warnings.warn(
"Scraper.handle_spider_output() is deprecated, use handle_spider_output_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(
self.handle_spider_output_async(result, request, response)
)
async def handle_spider_output_async(
self,
result: Iterable[_T] | AsyncIterator[_T],
request: Request,
response: Response | Failure,
) -> None:
it: Iterable[_T] | AsyncIterator[_T]
if is_asyncio_available():
if isinstance(result, AsyncIterator):
it = aiter_errback(result, self.handle_spider_error, request, response)
else:
it = iter_errback(result, self.handle_spider_error, request, response)
await _parallel_asyncio(
it, self.concurrent_items, self._process_spidermw_output_async, response
)
return
if isinstance(result, AsyncIterator):
it = aiter_errback(result, self.handle_spider_error, request, response)
await maybe_deferred_to_future(
parallel_async(
it,
self.concurrent_items,
self._process_spidermw_output,
response,
)
)
return
it = iter_errback(result, self.handle_spider_error, request, response)
await maybe_deferred_to_future(
parallel(
it,
self.concurrent_items,
self._process_spidermw_output,
response,
)
)
def _process_spidermw_output(
self, output: Any, response: Response | Failure
) -> Deferred[None]:
return deferred_from_coro(self._process_spidermw_output_async(output, response))
async def _process_spidermw_output_async(
self, output: Any, response: Response | Failure
) -> None:
if isinstance(output, Request):
assert self.crawler.engine is not None # typing
self.crawler.engine.crawl(request=output)
return
if output is not None:
await self.start_itemproc_async(output, response=response)
def start_itemproc(
self, item: Any, *, response: Response | Failure | None
) -> Deferred[None]: # pragma: no cover
warnings.warn(
"Scraper.start_itemproc() is deprecated, use start_itemproc_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.start_itemproc_async(item, response=response))
async def start_itemproc_async(
self, item: Any, *, response: Response | Failure | None
) -> None:
assert self.slot is not None # typing
assert self.crawler.spider is not None # typing
self.slot.itemproc_size += 1
try:
if self._itemproc_has_async["process_item"]:
output = await self.itemproc.process_item_async(item)
else:
output = await maybe_deferred_to_future(
self.itemproc.process_item(item, self.crawler.spider)
)
except DropItem as ex:
logkws = self.logformatter.dropped(item, ex, response, self.crawler.spider)
if logkws is not None:
logger.log(
*logformatter_adapter(logkws), extra={"spider": self.crawler.spider}
)
await self.signals.send_catch_log_async(
signal=signals.item_dropped,
item=item,
response=response,
spider=self.crawler.spider,
exception=ex,
)
except Exception as ex:
logkws = self.logformatter.item_error(
item, ex, response, self.crawler.spider
)
logger.log(
*logformatter_adapter(logkws),
extra={"spider": self.crawler.spider},
exc_info=True,
)
await self.signals.send_catch_log_async(
signal=signals.item_error,
item=item,
response=response,
spider=self.crawler.spider,
failure=Failure(),
)
else:
logkws = self.logformatter.scraped(output, response, self.crawler.spider)
if logkws is not None:
logger.log(
*logformatter_adapter(logkws), extra={"spider": self.crawler.spider}
)
await self.signals.send_catch_log_async(
signal=signals.item_scraped,
item=output,
response=response,
spider=self.crawler.spider,
)
finally:
self.slot.itemproc_size -= 1 | --- +++ @@ -1,3 +1,5 @@+"""This module implements the Scraper component which parses responses and
+extracts information from them"""
from __future__ import annotations
@@ -55,6 +57,7 @@
class Slot:
+ """Scraper slot (one per running spider)"""
MIN_RESPONSE_SIZE = 1024
@@ -159,6 +162,10 @@ return deferred_from_coro(self.open_spider_async())
async def open_spider_async(self) -> None:
+ """Open the spider for scraping and allocate resources for it.
+
+ .. versionadded:: 2.14
+ """
self.slot = Slot(self.crawler.settings.getint("SCRAPER_SLOT_MAX_ACTIVE_SIZE"))
if not self.crawler.spider:
raise RuntimeError(
@@ -182,6 +189,10 @@ return deferred_from_coro(self.close_spider_async())
async def close_spider_async(self) -> None:
+ """Close the spider being scraped and release its resources.
+
+ .. versionadded:: 2.14
+ """
if self.slot is None:
raise RuntimeError("Scraper slot not assigned")
self.slot.closing = Deferred()
@@ -196,6 +207,7 @@ )
def is_idle(self) -> bool:
+ """Return True if there isn't any more spiders to process"""
return not self.slot
def _check_if_closing(self) -> None:
@@ -234,6 +246,7 @@ _schedule_coro(self._wait_for_processing(result, request, queue_dfd))
async def _scrape(self, result: Response | Failure, request: Request) -> None:
+ """Handle the downloaded response or failure through the spider callback/errback."""
if not isinstance(result, (Response, Failure)):
raise TypeError(
f"Incorrect type: expected Response or Failure, got {type(result)}: {result!r}"
@@ -296,6 +309,10 @@ async def call_spider_async(
self, result: Response | Failure, request: Request
) -> Iterable[Any] | AsyncIterator[Any]:
+ """Call the request callback or errback with the response or failure.
+
+ .. versionadded:: 2.13
+ """
await _defer_sleep_async()
assert self.crawler.spider
if isinstance(result, Response):
@@ -340,6 +357,7 @@ response: Response | Failure,
spider: Spider | None = None,
) -> None:
+ """Handle an exception raised by a spider callback or errback."""
assert self.crawler.spider
exc = _failure.value
if isinstance(exc, CloseSpider):
@@ -375,6 +393,7 @@ response: Response | Failure,
spider: Spider | None = None,
) -> Deferred[None]: # pragma: no cover
+ """Pass items/requests produced by a callback to ``_process_spidermw_output()`` in parallel."""
warnings.warn(
"Scraper.handle_spider_output() is deprecated, use handle_spider_output_async() instead",
ScrapyDeprecationWarning,
@@ -390,6 +409,10 @@ request: Request,
response: Response | Failure,
) -> None:
+ """Pass items/requests produced by a callback to ``_process_spidermw_output()`` in parallel.
+
+ .. versionadded:: 2.13
+ """
it: Iterable[_T] | AsyncIterator[_T]
if is_asyncio_available():
if isinstance(result, AsyncIterator):
@@ -424,11 +447,21 @@ def _process_spidermw_output(
self, output: Any, response: Response | Failure
) -> Deferred[None]:
+ """Process each Request/Item (given in the output parameter) returned
+ from the given spider.
+
+ Items are sent to the item pipelines, requests are scheduled.
+ """
return deferred_from_coro(self._process_spidermw_output_async(output, response))
async def _process_spidermw_output_async(
self, output: Any, response: Response | Failure
) -> None:
+ """Process each Request/Item (given in the output parameter) returned
+ from the given spider.
+
+ Items are sent to the item pipelines, requests are scheduled.
+ """
if isinstance(output, Request):
assert self.crawler.engine is not None # typing
self.crawler.engine.crawl(request=output)
@@ -439,6 +472,11 @@ def start_itemproc(
self, item: Any, *, response: Response | Failure | None
) -> Deferred[None]: # pragma: no cover
+ """Send *item* to the item pipelines for processing.
+
+ *response* is the source of the item data. If the item does not come
+ from response data, e.g. it was hard-coded, set it to ``None``.
+ """
warnings.warn(
"Scraper.start_itemproc() is deprecated, use start_itemproc_async() instead",
ScrapyDeprecationWarning,
@@ -449,6 +487,13 @@ async def start_itemproc_async(
self, item: Any, *, response: Response | Failure | None
) -> None:
+ """Send *item* to the item pipelines for processing.
+
+ *response* is the source of the item data. If the item does not come
+ from response data, e.g. it was hard-coded, set it to ``None``.
+
+ .. versionadded:: 2.14
+ """
assert self.slot is not None # typing
assert self.crawler.spider is not None # typing
self.slot.itemproc_size += 1
@@ -501,4 +546,4 @@ spider=self.crawler.spider,
)
finally:
- self.slot.itemproc_size -= 1+ self.slot.itemproc_size -= 1
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/scraper.py |
Create documentation strings for testing functions | from __future__ import annotations
import asyncio
import contextlib
import logging
import pprint
import signal
import warnings
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, TypeVar
from twisted.internet.defer import Deferred, DeferredList, inlineCallbacks
from scrapy import Spider
from scrapy.addons import AddonManager
from scrapy.core.engine import ExecutionEngine
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.extension import ExtensionManager
from scrapy.settings import Settings, overridden_settings
from scrapy.signalmanager import SignalManager
from scrapy.spiderloader import SpiderLoaderProtocol, get_spider_loader
from scrapy.utils.defer import deferred_from_coro
from scrapy.utils.log import (
configure_logging,
get_scrapy_root_handler,
install_scrapy_root_handler,
log_reactor_info,
log_scrapy_info,
)
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.ossignal import install_shutdown_handlers, signal_names
from scrapy.utils.reactor import (
_asyncio_reactor_path,
install_reactor,
is_asyncio_reactor_installed,
is_reactor_installed,
set_asyncio_event_loop,
verify_installed_asyncio_event_loop,
verify_installed_reactor,
)
from scrapy.utils.reactorless import install_reactor_import_hook
if TYPE_CHECKING:
from collections.abc import Awaitable, Generator, Iterable
from scrapy.logformatter import LogFormatter
from scrapy.statscollectors import StatsCollector
from scrapy.utils.request import RequestFingerprinterProtocol
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
class Crawler:
def __init__(
self,
spidercls: type[Spider],
settings: dict[str, Any] | Settings | None = None,
init_reactor: bool = False,
):
if isinstance(spidercls, Spider):
raise ValueError("The spidercls argument must be a class, not an object")
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
self.spidercls: type[Spider] = spidercls
self.settings: Settings = settings.copy()
self.spidercls.update_settings(self.settings)
self._update_root_log_handler()
self.addons: AddonManager = AddonManager(self)
self.signals: SignalManager = SignalManager(self)
self._init_reactor: bool = init_reactor
self.crawling: bool = False
self._started: bool = False
self.extensions: ExtensionManager | None = None
self.stats: StatsCollector | None = None
self.logformatter: LogFormatter | None = None
self.request_fingerprinter: RequestFingerprinterProtocol | None = None
self.spider: Spider | None = None
self.engine: ExecutionEngine | None = None
def _update_root_log_handler(self) -> None:
if get_scrapy_root_handler() is not None:
# scrapy root handler already installed: update it with new settings
install_scrapy_root_handler(self.settings)
def _apply_settings(self) -> None:
if self.settings.frozen:
return
self.addons.load_settings(self.settings)
self.stats = load_object(self.settings["STATS_CLASS"])(self)
lf_cls: type[LogFormatter] = load_object(self.settings["LOG_FORMATTER"])
self.logformatter = lf_cls.from_crawler(self)
self.request_fingerprinter = build_from_crawler(
load_object(self.settings["REQUEST_FINGERPRINTER_CLASS"]),
self,
)
use_reactor = self.settings.getbool("TWISTED_ENABLED")
if use_reactor:
reactor_class: str = self.settings["TWISTED_REACTOR"]
event_loop: str = self.settings["ASYNCIO_EVENT_LOOP"]
if self._init_reactor:
# this needs to be done after the spider settings are merged,
# but before something imports twisted.internet.reactor
if reactor_class:
install_reactor(reactor_class, event_loop)
else:
from twisted.internet import reactor # noqa: F401
if reactor_class:
verify_installed_reactor(reactor_class)
if is_asyncio_reactor_installed() and event_loop:
verify_installed_asyncio_event_loop(event_loop)
if self._init_reactor or reactor_class:
log_reactor_info()
else:
logger.debug("Not using a Twisted reactor")
self._apply_reactorless_default_settings()
self.extensions = ExtensionManager.from_crawler(self)
self.settings.freeze()
d = dict(overridden_settings(self.settings))
logger.info(
"Overridden settings:\n%(settings)s", {"settings": pprint.pformat(d)}
)
def _apply_reactorless_default_settings(self) -> None:
self.settings.set("TELNETCONSOLE_ENABLED", False, priority="default")
# Cannot use @deferred_f_from_coro_f because that relies on the reactor
# being installed already, which is done within _apply_settings(), inside
# this method.
@inlineCallbacks
def crawl(self, *args: Any, **kwargs: Any) -> Generator[Deferred[Any], Any, None]:
if self.crawling:
raise RuntimeError("Crawling already taking place")
if self._started:
raise RuntimeError(
"Cannot run Crawler.crawl() more than once on the same instance."
)
self.crawling = self._started = True
try:
self.spider = self._create_spider(*args, **kwargs)
self._apply_settings()
self._update_root_log_handler()
self.engine = self._create_engine()
yield deferred_from_coro(self.engine.open_spider_async())
yield deferred_from_coro(self.engine.start_async())
except Exception:
self.crawling = False
if self.engine is not None:
yield deferred_from_coro(self.engine.close_async())
raise
async def crawl_async(self, *args: Any, **kwargs: Any) -> None:
if self.crawling:
raise RuntimeError("Crawling already taking place")
if self._started:
raise RuntimeError(
"Cannot run Crawler.crawl_async() more than once on the same instance."
)
self.crawling = self._started = True
try:
self.spider = self._create_spider(*args, **kwargs)
self._apply_settings()
self._update_root_log_handler()
self.engine = self._create_engine()
await self.engine.open_spider_async()
await self.engine.start_async()
except Exception:
self.crawling = False
if self.engine is not None:
await self.engine.close_async()
raise
def _create_spider(self, *args: Any, **kwargs: Any) -> Spider:
return self.spidercls.from_crawler(self, *args, **kwargs)
def _create_engine(self) -> ExecutionEngine:
return ExecutionEngine(self, lambda _: self.stop_async())
def stop(self) -> Deferred[None]:
warnings.warn(
"Crawler.stop() is deprecated, use stop_async() instead",
ScrapyDeprecationWarning,
stacklevel=2,
)
return deferred_from_coro(self.stop_async())
async def stop_async(self) -> None:
if self.crawling:
self.crawling = False
assert self.engine
if self.engine.running:
await self.engine.stop_async()
@staticmethod
def _get_component(
component_class: type[_T], components: Iterable[Any]
) -> _T | None:
for component in components:
if isinstance(component, component_class):
return component
return None
def get_addon(self, cls: type[_T]) -> _T | None:
return self._get_component(cls, self.addons.addons)
def get_downloader_middleware(self, cls: type[_T]) -> _T | None:
if not self.engine:
raise RuntimeError(
"Crawler.get_downloader_middleware() can only be called after "
"the crawl engine has been created."
)
return self._get_component(cls, self.engine.downloader.middleware.middlewares)
def get_extension(self, cls: type[_T]) -> _T | None:
if not self.extensions:
raise RuntimeError(
"Crawler.get_extension() can only be called after the "
"extension manager has been created."
)
return self._get_component(cls, self.extensions.middlewares)
def get_item_pipeline(self, cls: type[_T]) -> _T | None:
if not self.engine:
raise RuntimeError(
"Crawler.get_item_pipeline() can only be called after the "
"crawl engine has been created."
)
return self._get_component(cls, self.engine.scraper.itemproc.middlewares)
def get_spider_middleware(self, cls: type[_T]) -> _T | None:
if not self.engine:
raise RuntimeError(
"Crawler.get_spider_middleware() can only be called after the "
"crawl engine has been created."
)
return self._get_component(cls, self.engine.scraper.spidermw.middlewares)
class CrawlerRunnerBase(ABC):
def __init__(self, settings: dict[str, Any] | Settings | None = None):
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
AddonManager.load_pre_crawler_settings(settings)
self.settings: Settings = settings
self.spider_loader: SpiderLoaderProtocol = get_spider_loader(settings)
self._crawlers: set[Crawler] = set()
self.bootstrap_failed = False
@property
def crawlers(self) -> set[Crawler]:
return self._crawlers
def create_crawler(
self, crawler_or_spidercls: type[Spider] | str | Crawler
) -> Crawler:
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
"it must be a spider class (or a Crawler object)"
)
if isinstance(crawler_or_spidercls, Crawler):
return crawler_or_spidercls
return self._create_crawler(crawler_or_spidercls)
def _create_crawler(self, spidercls: str | type[Spider]) -> Crawler:
if isinstance(spidercls, str):
spidercls = self.spider_loader.load(spidercls)
return Crawler(spidercls, self.settings)
@abstractmethod
def crawl(
self,
crawler_or_spidercls: type[Spider] | str | Crawler,
*args: Any,
**kwargs: Any,
) -> Awaitable[None]:
raise NotImplementedError
class CrawlerRunner(CrawlerRunnerBase):
def __init__(self, settings: dict[str, Any] | Settings | None = None):
super().__init__(settings)
if not self.settings.getbool("TWISTED_ENABLED"):
raise RuntimeError(
f"{type(self).__name__} doesn't support TWISTED_ENABLED=False."
)
self._active: set[Deferred[None]] = set()
def crawl(
self,
crawler_or_spidercls: type[Spider] | str | Crawler,
*args: Any,
**kwargs: Any,
) -> Deferred[None]:
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
"it must be a spider class (or a Crawler object)"
)
crawler = self.create_crawler(crawler_or_spidercls)
return self._crawl(crawler, *args, **kwargs)
@inlineCallbacks
def _crawl(
self, crawler: Crawler, *args: Any, **kwargs: Any
) -> Generator[Deferred[Any], Any, None]:
self.crawlers.add(crawler)
d = crawler.crawl(*args, **kwargs)
self._active.add(d)
failed = False
try:
yield d
except Exception:
failed = True
raise
finally:
self.crawlers.discard(crawler)
self._active.discard(d)
self.bootstrap_failed |= not getattr(crawler, "spider", None) or failed
def stop(self) -> Deferred[Any]:
return DeferredList(deferred_from_coro(c.stop_async()) for c in self.crawlers)
@inlineCallbacks
def join(self) -> Generator[Deferred[Any], Any, None]:
while self._active:
yield DeferredList(self._active)
class AsyncCrawlerRunner(CrawlerRunnerBase):
def __init__(self, settings: dict[str, Any] | Settings | None = None):
super().__init__(settings)
self._active: set[asyncio.Task[None]] = set()
def crawl(
self,
crawler_or_spidercls: type[Spider] | str | Crawler,
*args: Any,
**kwargs: Any,
) -> asyncio.Task[None]:
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
"it must be a spider class (or a Crawler object)"
)
if self.settings.getbool("TWISTED_ENABLED"):
if not is_asyncio_reactor_installed():
raise RuntimeError(
f"When TWISTED_ENABLED is True, {type(self).__name__} "
f"requires that the installed Twisted reactor is "
f'"twisted.internet.asyncioreactor.AsyncioSelectorReactor".'
)
elif is_reactor_installed():
raise RuntimeError(
"TWISTED_ENABLED is False but a Twisted reactor is installed."
)
crawler = self.create_crawler(crawler_or_spidercls)
return self._crawl(crawler, *args, **kwargs)
def _crawl(self, crawler: Crawler, *args: Any, **kwargs: Any) -> asyncio.Task[None]:
# At this point the asyncio loop has been installed either by the user
# or by AsyncCrawlerProcess (but it isn't running yet, so no asyncio.create_task()).
loop = asyncio.get_event_loop()
self.crawlers.add(crawler)
async def _crawl_and_track() -> None:
try:
await crawler.crawl_async(*args, **kwargs)
except Exception:
self.bootstrap_failed = True
raise # re-raise so asyncio still logs it to stderr naturally
task = loop.create_task(_crawl_and_track())
self._active.add(task)
def _done(_: asyncio.Task[None]) -> None:
self.crawlers.discard(crawler)
self._active.discard(task)
self.bootstrap_failed |= not getattr(crawler, "spider", None)
task.add_done_callback(_done)
return task
async def stop(self) -> None:
if self.crawlers:
await asyncio.wait(
[asyncio.create_task(c.stop_async()) for c in self.crawlers]
)
async def join(self) -> None:
while self._active:
await asyncio.wait(self._active)
class CrawlerProcessBase(CrawlerRunnerBase):
def __init__(
self,
settings: dict[str, Any] | Settings | None = None,
install_root_handler: bool = True,
):
super().__init__(settings)
configure_logging(self.settings, install_root_handler)
log_scrapy_info(self.settings)
@abstractmethod
def start(
self, stop_after_crawl: bool = True, install_signal_handlers: bool = True
) -> None:
raise NotImplementedError
def _signal_shutdown(self, signum: int, _: Any) -> None:
from twisted.internet import reactor
install_shutdown_handlers(self._signal_kill)
signame = signal_names[signum]
logger.info(
"Received %(signame)s, shutting down gracefully. Send again to force ",
{"signame": signame},
)
reactor.callFromThread(self._graceful_stop_reactor)
def _signal_kill(self, signum: int, _: Any) -> None:
from twisted.internet import reactor
install_shutdown_handlers(signal.SIG_IGN)
signame = signal_names[signum]
logger.info(
"Received %(signame)s twice, forcing unclean shutdown", {"signame": signame}
)
reactor.callFromThread(self._stop_reactor)
def _setup_reactor(self, install_signal_handlers: bool) -> None:
from twisted.internet import reactor
resolver_class = load_object(self.settings["DNS_RESOLVER"])
# We pass self, which is CrawlerProcess, instead of Crawler here,
# which works because the default resolvers only use crawler.settings.
resolver = build_from_crawler(resolver_class, self, reactor=reactor) # type: ignore[arg-type]
resolver.install_on_reactor()
tp = reactor.getThreadPool()
tp.adjustPoolsize(maxthreads=self.settings.getint("REACTOR_THREADPOOL_MAXSIZE"))
reactor.addSystemEventTrigger("before", "shutdown", self._stop_dfd)
if install_signal_handlers:
reactor.addSystemEventTrigger(
"after", "startup", install_shutdown_handlers, self._signal_shutdown
)
@abstractmethod
def _stop_dfd(self) -> Deferred[Any]:
raise NotImplementedError
@inlineCallbacks
def _graceful_stop_reactor(self) -> Generator[Deferred[Any], Any, None]:
try:
yield self._stop_dfd()
finally:
self._stop_reactor()
def _stop_reactor(self, _: Any = None) -> None:
from twisted.internet import reactor
# raised if already stopped or in shutdown stage
with contextlib.suppress(RuntimeError):
reactor.stop()
class CrawlerProcess(CrawlerProcessBase, CrawlerRunner):
def __init__(
self,
settings: dict[str, Any] | Settings | None = None,
install_root_handler: bool = True,
):
super().__init__(settings, install_root_handler)
self._initialized_reactor: bool = False
logger.debug("Using CrawlerProcess")
def _create_crawler(self, spidercls: type[Spider] | str) -> Crawler:
if isinstance(spidercls, str):
spidercls = self.spider_loader.load(spidercls)
init_reactor = not self._initialized_reactor
self._initialized_reactor = True
return Crawler(spidercls, self.settings, init_reactor=init_reactor)
def _stop_dfd(self) -> Deferred[Any]:
return self.stop()
def start(
self, stop_after_crawl: bool = True, install_signal_handlers: bool = True
) -> None:
from twisted.internet import reactor
if stop_after_crawl:
d = self.join()
# Don't start the reactor if the deferreds are already fired
if d.called:
return
d.addBoth(self._stop_reactor)
self._setup_reactor(install_signal_handlers)
reactor.run(installSignalHandlers=install_signal_handlers) # blocking call
class AsyncCrawlerProcess(CrawlerProcessBase, AsyncCrawlerRunner):
def __init__(
self,
settings: dict[str, Any] | Settings | None = None,
install_root_handler: bool = True,
):
super().__init__(settings, install_root_handler)
logger.debug("Using AsyncCrawlerProcess")
# We want the asyncio event loop to be installed early, so that it's
# always the correct one. And as we do that, we can also install the
# reactor here.
# The ASYNCIO_EVENT_LOOP setting cannot be overridden by add-ons and
# spiders when using AsyncCrawlerProcess.
loop_path = self.settings["ASYNCIO_EVENT_LOOP"]
if not self.settings.getbool("TWISTED_ENABLED"):
if is_reactor_installed():
raise RuntimeError(
"TWISTED_ENABLED is False but a Twisted reactor is installed."
)
set_asyncio_event_loop(loop_path)
install_reactor_import_hook()
elif is_reactor_installed():
# The user could install a reactor before this class is instantiated.
# We need to make sure the reactor is the correct one and the loop
# type matches the setting.
verify_installed_reactor(_asyncio_reactor_path)
if loop_path:
verify_installed_asyncio_event_loop(loop_path)
else:
install_reactor(_asyncio_reactor_path, loop_path)
self._initialized_reactor = True
def _stop_dfd(self) -> Deferred[Any]:
return deferred_from_coro(self.stop())
def start(
self, stop_after_crawl: bool = True, install_signal_handlers: bool = True
) -> None:
if not self.settings.getbool("TWISTED_ENABLED"):
self._start_asyncio(stop_after_crawl, install_signal_handlers)
else:
self._start_twisted(stop_after_crawl, install_signal_handlers)
def _start_asyncio(
self, stop_after_crawl: bool, install_signal_handlers: bool
) -> None:
# Very basic and will need multiple improvements.
# TODO https://docs.python.org/3/library/asyncio-runner.html#handling-keyboard-interruption
# TODO various exception handling
# TODO consider asyncio.run()
loop = asyncio.get_event_loop()
if stop_after_crawl:
join_task = loop.create_task(self.join())
join_task.add_done_callback(lambda _: loop.stop())
try:
loop.run_forever() # blocking call
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
def _start_twisted(
self, stop_after_crawl: bool, install_signal_handlers: bool
) -> None:
from twisted.internet import reactor
if stop_after_crawl:
loop = asyncio.get_event_loop()
join_task = loop.create_task(self.join())
join_task.add_done_callback(self._stop_reactor)
self._setup_reactor(install_signal_handlers)
reactor.run(installSignalHandlers=install_signal_handlers) # blocking call | --- +++ @@ -136,6 +136,12 @@ )
def _apply_reactorless_default_settings(self) -> None:
+ """Change some setting defaults when not using a Twisted reactor.
+
+ Some settings need different defaults when using and not using a
+ reactor, but as we can't put this logic into default_settings.py we
+ change them here when the reactor is not used.
+ """
self.settings.set("TELNETCONSOLE_ENABLED", False, priority="default")
# Cannot use @deferred_f_from_coro_f because that relies on the reactor
@@ -143,6 +149,12 @@ # this method.
@inlineCallbacks
def crawl(self, *args: Any, **kwargs: Any) -> Generator[Deferred[Any], Any, None]:
+ """Start the crawler by instantiating its spider class with the given
+ *args* and *kwargs* arguments, while setting the execution engine in
+ motion. Should be called only once.
+
+ Return a deferred that is fired when the crawl is finished.
+ """
if self.crawling:
raise RuntimeError("Crawling already taking place")
if self._started:
@@ -165,6 +177,14 @@ raise
async def crawl_async(self, *args: Any, **kwargs: Any) -> None:
+ """Start the crawler by instantiating its spider class with the given
+ *args* and *kwargs* arguments, while setting the execution engine in
+ motion. Should be called only once.
+
+ .. versionadded:: 2.14
+
+ Complete when the crawl is finished.
+ """
if self.crawling:
raise RuntimeError("Crawling already taking place")
if self._started:
@@ -193,6 +213,8 @@ return ExecutionEngine(self, lambda _: self.stop_async())
def stop(self) -> Deferred[None]:
+ """Start a graceful stop of the crawler and return a deferred that is
+ fired when the crawler is stopped."""
warnings.warn(
"Crawler.stop() is deprecated, use stop_async() instead",
ScrapyDeprecationWarning,
@@ -201,6 +223,10 @@ return deferred_from_coro(self.stop_async())
async def stop_async(self) -> None:
+ """Start a graceful stop of the crawler and complete when the crawler is stopped.
+
+ .. versionadded:: 2.14
+ """
if self.crawling:
self.crawling = False
assert self.engine
@@ -217,9 +243,23 @@ return None
def get_addon(self, cls: type[_T]) -> _T | None:
+ """Return the run-time instance of an :ref:`add-on <topics-addons>` of
+ the specified class or a subclass, or ``None`` if none is found.
+
+ .. versionadded:: 2.12
+ """
return self._get_component(cls, self.addons.addons)
def get_downloader_middleware(self, cls: type[_T]) -> _T | None:
+ """Return the run-time instance of a :ref:`downloader middleware
+ <topics-downloader-middleware>` of the specified class or a subclass,
+ or ``None`` if none is found.
+
+ .. versionadded:: 2.12
+
+ This method can only be called after the crawl engine has been created,
+ e.g. at signals :signal:`engine_started` or :signal:`spider_opened`.
+ """
if not self.engine:
raise RuntimeError(
"Crawler.get_downloader_middleware() can only be called after "
@@ -228,6 +268,16 @@ return self._get_component(cls, self.engine.downloader.middleware.middlewares)
def get_extension(self, cls: type[_T]) -> _T | None:
+ """Return the run-time instance of an :ref:`extension
+ <topics-extensions>` of the specified class or a subclass,
+ or ``None`` if none is found.
+
+ .. versionadded:: 2.12
+
+ This method can only be called after the extension manager has been
+ created, e.g. at signals :signal:`engine_started` or
+ :signal:`spider_opened`.
+ """
if not self.extensions:
raise RuntimeError(
"Crawler.get_extension() can only be called after the "
@@ -236,6 +286,15 @@ return self._get_component(cls, self.extensions.middlewares)
def get_item_pipeline(self, cls: type[_T]) -> _T | None:
+ """Return the run-time instance of a :ref:`item pipeline
+ <topics-item-pipeline>` of the specified class or a subclass, or
+ ``None`` if none is found.
+
+ .. versionadded:: 2.12
+
+ This method can only be called after the crawl engine has been created,
+ e.g. at signals :signal:`engine_started` or :signal:`spider_opened`.
+ """
if not self.engine:
raise RuntimeError(
"Crawler.get_item_pipeline() can only be called after the "
@@ -244,6 +303,15 @@ return self._get_component(cls, self.engine.scraper.itemproc.middlewares)
def get_spider_middleware(self, cls: type[_T]) -> _T | None:
+ """Return the run-time instance of a :ref:`spider middleware
+ <topics-spider-middleware>` of the specified class or a subclass, or
+ ``None`` if none is found.
+
+ .. versionadded:: 2.12
+
+ This method can only be called after the crawl engine has been created,
+ e.g. at signals :signal:`engine_started` or :signal:`spider_opened`.
+ """
if not self.engine:
raise RuntimeError(
"Crawler.get_spider_middleware() can only be called after the "
@@ -264,11 +332,23 @@
@property
def crawlers(self) -> set[Crawler]:
+ """Set of :class:`crawlers <scrapy.crawler.Crawler>` started by
+ :meth:`crawl` and managed by this class."""
return self._crawlers
def create_crawler(
self, crawler_or_spidercls: type[Spider] | str | Crawler
) -> Crawler:
+ """
+ Return a :class:`~scrapy.crawler.Crawler` object.
+
+ * If ``crawler_or_spidercls`` is a Crawler, it is returned as-is.
+ * If ``crawler_or_spidercls`` is a Spider subclass, a new Crawler
+ is constructed for it.
+ * If ``crawler_or_spidercls`` is a string, this function finds
+ a spider with this name in a Scrapy project (using spider loader),
+ then creates a Crawler instance for it.
+ """
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
@@ -294,6 +374,20 @@
class CrawlerRunner(CrawlerRunnerBase):
+ """
+ This is a convenient helper class that keeps track of, manages and runs
+ crawlers inside an already setup :mod:`~twisted.internet.reactor`.
+
+ The CrawlerRunner object must be instantiated with a
+ :class:`~scrapy.settings.Settings` object.
+
+ This class shouldn't be needed (since Scrapy is responsible of using it
+ accordingly) unless writing scripts that manually handle the crawling
+ process. See :ref:`run-from-script` for an example.
+
+ This class provides Deferred-based APIs. Use :class:`AsyncCrawlerRunner`
+ for modern coroutine APIs.
+ """
def __init__(self, settings: dict[str, Any] | Settings | None = None):
super().__init__(settings)
@@ -309,6 +403,27 @@ *args: Any,
**kwargs: Any,
) -> Deferred[None]:
+ """
+ Run a crawler with the provided arguments.
+
+ It will call the given Crawler's :meth:`~Crawler.crawl` method, while
+ keeping track of it so it can be stopped later.
+
+ If ``crawler_or_spidercls`` isn't a :class:`~scrapy.crawler.Crawler`
+ instance, this method will try to create one using this parameter as
+ the spider class given to it.
+
+ Returns a deferred that is fired when the crawling is finished.
+
+ :param crawler_or_spidercls: already created crawler, or a spider class
+ or spider's name inside the project to create it
+ :type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
+ :class:`~scrapy.spiders.Spider` subclass or string
+
+ :param args: arguments to initialize the spider
+
+ :param kwargs: keyword arguments to initialize the spider
+ """
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
@@ -336,15 +451,40 @@ self.bootstrap_failed |= not getattr(crawler, "spider", None) or failed
def stop(self) -> Deferred[Any]:
+ """
+ Stops simultaneously all the crawling jobs taking place.
+
+ Returns a deferred that is fired when they all have ended.
+ """
return DeferredList(deferred_from_coro(c.stop_async()) for c in self.crawlers)
@inlineCallbacks
def join(self) -> Generator[Deferred[Any], Any, None]:
+ """
+ join()
+
+ Returns a deferred that is fired when all managed :attr:`crawlers` have
+ completed their executions.
+ """
while self._active:
yield DeferredList(self._active)
class AsyncCrawlerRunner(CrawlerRunnerBase):
+ """
+ This is a convenient helper class that keeps track of, manages and runs
+ crawlers inside an already setup :mod:`~twisted.internet.reactor`.
+
+ The AsyncCrawlerRunner object must be instantiated with a
+ :class:`~scrapy.settings.Settings` object.
+
+ This class shouldn't be needed (since Scrapy is responsible of using it
+ accordingly) unless writing scripts that manually handle the crawling
+ process. See :ref:`run-from-script` for an example.
+
+ This class provides coroutine APIs. It requires
+ :class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor`.
+ """
def __init__(self, settings: dict[str, Any] | Settings | None = None):
super().__init__(settings)
@@ -356,6 +496,28 @@ *args: Any,
**kwargs: Any,
) -> asyncio.Task[None]:
+ """
+ Run a crawler with the provided arguments.
+
+ It will call the given Crawler's :meth:`~Crawler.crawl` method, while
+ keeping track of it so it can be stopped later.
+
+ If ``crawler_or_spidercls`` isn't a :class:`~scrapy.crawler.Crawler`
+ instance, this method will try to create one using this parameter as
+ the spider class given to it.
+
+ Returns a :class:`~asyncio.Task` object which completes when the
+ crawling is finished.
+
+ :param crawler_or_spidercls: already created crawler, or a spider class
+ or spider's name inside the project to create it
+ :type crawler_or_spidercls: :class:`~scrapy.crawler.Crawler` instance,
+ :class:`~scrapy.spiders.Spider` subclass or string
+
+ :param args: arguments to initialize the spider
+
+ :param kwargs: keyword arguments to initialize the spider
+ """
if isinstance(crawler_or_spidercls, Spider):
raise ValueError(
"The crawler_or_spidercls argument cannot be a spider object, "
@@ -400,12 +562,21 @@ return task
async def stop(self) -> None:
+ """
+ Stops simultaneously all the crawling jobs taking place.
+
+ Completes when they all have ended.
+ """
if self.crawlers:
await asyncio.wait(
[asyncio.create_task(c.stop_async()) for c in self.crawlers]
)
async def join(self) -> None:
+ """
+ Completes when all managed :attr:`crawlers` have completed their
+ executions.
+ """
while self._active:
await asyncio.wait(self._active)
@@ -483,6 +654,31 @@
class CrawlerProcess(CrawlerProcessBase, CrawlerRunner):
+ """
+ A class to run multiple scrapy crawlers in a process simultaneously.
+
+ This class extends :class:`~scrapy.crawler.CrawlerRunner` by adding support
+ for starting a :mod:`~twisted.internet.reactor` and handling shutdown
+ signals, like the keyboard interrupt command Ctrl-C. It also configures
+ top-level logging.
+
+ This utility should be a better fit than
+ :class:`~scrapy.crawler.CrawlerRunner` if you aren't running another
+ :mod:`~twisted.internet.reactor` within your application.
+
+ The CrawlerProcess object must be instantiated with a
+ :class:`~scrapy.settings.Settings` object.
+
+ :param install_root_handler: whether to install root logging handler
+ (default: True)
+
+ This class shouldn't be needed (since Scrapy is responsible of using it
+ accordingly) unless writing scripts that manually handle the crawling
+ process. See :ref:`run-from-script` for an example.
+
+ This class provides Deferred-based APIs. Use :class:`AsyncCrawlerProcess`
+ for modern coroutine APIs.
+ """
def __init__(
self,
@@ -506,6 +702,20 @@ def start(
self, stop_after_crawl: bool = True, install_signal_handlers: bool = True
) -> None:
+ """
+ This method starts a :mod:`~twisted.internet.reactor`, adjusts its pool
+ size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache
+ based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
+
+ If ``stop_after_crawl`` is True, the reactor will be stopped after all
+ crawlers have finished, using :meth:`join`.
+
+ :param bool stop_after_crawl: stop or not the reactor when all
+ crawlers have finished
+
+ :param bool install_signal_handlers: whether to install the OS signal
+ handlers from Twisted and Scrapy (default: True)
+ """
from twisted.internet import reactor
if stop_after_crawl:
@@ -520,6 +730,31 @@
class AsyncCrawlerProcess(CrawlerProcessBase, AsyncCrawlerRunner):
+ """
+ A class to run multiple scrapy crawlers in a process simultaneously.
+
+ This class extends :class:`~scrapy.crawler.AsyncCrawlerRunner` by adding support
+ for starting a :mod:`~twisted.internet.reactor` and handling shutdown
+ signals, like the keyboard interrupt command Ctrl-C. It also configures
+ top-level logging.
+
+ This utility should be a better fit than
+ :class:`~scrapy.crawler.AsyncCrawlerRunner` if you aren't running another
+ :mod:`~twisted.internet.reactor` within your application.
+
+ The AsyncCrawlerProcess object must be instantiated with a
+ :class:`~scrapy.settings.Settings` object.
+
+ :param install_root_handler: whether to install root logging handler
+ (default: True)
+
+ This class shouldn't be needed (since Scrapy is responsible of using it
+ accordingly) unless writing scripts that manually handle the crawling
+ process. See :ref:`run-from-script` for an example.
+
+ This class provides coroutine APIs. It requires
+ :class:`~twisted.internet.asyncioreactor.AsyncioSelectorReactor`.
+ """
def __init__(
self,
@@ -558,6 +793,20 @@ def start(
self, stop_after_crawl: bool = True, install_signal_handlers: bool = True
) -> None:
+ """
+ This method starts a :mod:`~twisted.internet.reactor`, adjusts its pool
+ size to :setting:`REACTOR_THREADPOOL_MAXSIZE`, and installs a DNS cache
+ based on :setting:`DNSCACHE_ENABLED` and :setting:`DNSCACHE_SIZE`.
+
+ If ``stop_after_crawl`` is True, the reactor will be stopped after all
+ crawlers have finished, using :meth:`join`.
+
+ :param bool stop_after_crawl: stop or not the reactor when all
+ crawlers have finished
+
+ :param bool install_signal_handlers: whether to install the OS signal
+ handlers from Twisted and Scrapy (default: True)
+ """
if not self.settings.getbool("TWISTED_ENABLED"):
self._start_asyncio(stop_after_crawl, install_signal_handlers)
@@ -593,4 +842,4 @@ join_task.add_done_callback(self._stop_reactor)
self._setup_reactor(install_signal_handlers)
- reactor.run(installSignalHandlers=install_signal_handlers) # blocking call+ reactor.run(installSignalHandlers=install_signal_handlers) # blocking call
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/crawler.py |
Add docstrings to improve readability | # Copyright 2025 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Generator
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from gradio.components import Component
class Manager:
def __init__(self) -> None:
self._id_to_elem: dict[str, Component] = {}
self._elem_to_id: dict[Component, str] = {}
def add_elems(self, tab_name: str, elem_dict: dict[str, "Component"]) -> None:
for elem_name, elem in elem_dict.items():
elem_id = f"{tab_name}.{elem_name}"
self._id_to_elem[elem_id] = elem
self._elem_to_id[elem] = elem_id
def get_elem_list(self) -> list["Component"]:
return list(self._id_to_elem.values())
def get_elem_iter(self) -> Generator[tuple[str, "Component"], None, None]:
for elem_id, elem in self._id_to_elem.items():
yield elem_id.split(".")[-1], elem
def get_elem_by_id(self, elem_id: str) -> "Component":
return self._id_to_elem[elem_id]
def get_id_by_elem(self, elem: "Component") -> str:
return self._elem_to_id[elem]
def get_base_elems(self) -> set["Component"]:
return {
self._id_to_elem["top.lang"],
self._id_to_elem["top.model_name"],
self._id_to_elem["top.model_path"],
self._id_to_elem["top.finetuning_type"],
self._id_to_elem["top.checkpoint_path"],
self._id_to_elem["top.quantization_bit"],
self._id_to_elem["top.quantization_method"],
self._id_to_elem["top.template"],
self._id_to_elem["top.rope_scaling"],
self._id_to_elem["top.booster"],
} | --- +++ @@ -21,31 +21,41 @@
class Manager:
+ r"""A class to manage all the gradio components in Web UI."""
def __init__(self) -> None:
self._id_to_elem: dict[str, Component] = {}
self._elem_to_id: dict[Component, str] = {}
def add_elems(self, tab_name: str, elem_dict: dict[str, "Component"]) -> None:
+ r"""Add elements to manager."""
for elem_name, elem in elem_dict.items():
elem_id = f"{tab_name}.{elem_name}"
self._id_to_elem[elem_id] = elem
self._elem_to_id[elem] = elem_id
def get_elem_list(self) -> list["Component"]:
+ r"""Return the list of all elements."""
return list(self._id_to_elem.values())
def get_elem_iter(self) -> Generator[tuple[str, "Component"], None, None]:
+ r"""Return an iterator over all elements with their names."""
for elem_id, elem in self._id_to_elem.items():
yield elem_id.split(".")[-1], elem
def get_elem_by_id(self, elem_id: str) -> "Component":
+ r"""Get element by id.
+
+ Example: top.lang, train.dataset
+ """
return self._id_to_elem[elem_id]
def get_id_by_elem(self, elem: "Component") -> str:
+ r"""Get id by element."""
return self._elem_to_id[elem]
def get_base_elems(self) -> set["Component"]:
+ r"""Get the base elements that are commonly used."""
return {
self._id_to_elem["top.lang"],
self._id_to_elem["top.model_name"],
@@ -57,4 +67,4 @@ self._id_to_elem["top.template"],
self._id_to_elem["top.rope_scaling"],
self._id_to_elem["top.booster"],
- }+ }
| https://raw.githubusercontent.com/hiyouga/LlamaFactory/HEAD/src/llamafactory/webui/manager.py |
Document functions with detailed explanations | from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any
from itemadapter import ItemAdapter, is_item
from scrapy.contracts import Contract
from scrapy.exceptions import ContractFail
from scrapy.http import Request
if TYPE_CHECKING:
from collections.abc import Callable
# contracts
class UrlContract(Contract):
name = "url"
def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:
args["url"] = self.args[0]
return args
class CallbackKeywordArgumentsContract(Contract):
name = "cb_kwargs"
def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:
args["cb_kwargs"] = json.loads(" ".join(self.args))
return args
class MetadataContract(Contract):
name = "meta"
def adjust_request_args(self, args: dict[str, Any]) -> dict[str, Any]:
args["meta"] = json.loads(" ".join(self.args))
return args
class ReturnsContract(Contract):
name = "returns"
object_type_verifiers: dict[str | None, Callable[[Any], bool]] = {
"request": lambda x: isinstance(x, Request),
"requests": lambda x: isinstance(x, Request),
"item": is_item,
"items": is_item,
}
def __init__(self, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
if len(self.args) not in [1, 2, 3]:
raise ValueError(
f"Incorrect argument quantity: expected 1, 2 or 3, got {len(self.args)}"
)
self.obj_name = self.args[0] or None
self.obj_type_verifier = self.object_type_verifiers[self.obj_name]
try:
self.min_bound: float = int(self.args[1])
except IndexError:
self.min_bound = 1
try:
self.max_bound: float = int(self.args[2])
except IndexError:
self.max_bound = float("inf")
def post_process(self, output: list[Any]) -> None:
occurrences = 0
for x in output:
if self.obj_type_verifier(x):
occurrences += 1
assertion = self.min_bound <= occurrences <= self.max_bound
if not assertion:
if self.min_bound == self.max_bound:
expected = str(self.min_bound)
else:
expected = f"{self.min_bound}..{self.max_bound}"
raise ContractFail(
f"Returned {occurrences} {self.obj_name}, expected {expected}"
)
class ScrapesContract(Contract):
name = "scrapes"
def post_process(self, output: list[Any]) -> None:
for x in output:
if is_item(x):
missing = [arg for arg in self.args if arg not in ItemAdapter(x)]
if missing:
missing_fields = ", ".join(missing)
raise ContractFail(f"Missing fields: {missing_fields}") | --- +++ @@ -15,6 +15,9 @@
# contracts
class UrlContract(Contract):
+ """Contract to set the url of the request (mandatory)
+ @url http://scrapy.org
+ """
name = "url"
@@ -24,6 +27,11 @@
class CallbackKeywordArgumentsContract(Contract):
+ """Contract to set the keyword arguments for the request.
+ The value should be a JSON-encoded dictionary, e.g.:
+
+ @cb_kwargs {"arg1": "some value"}
+ """
name = "cb_kwargs"
@@ -33,6 +41,11 @@
class MetadataContract(Contract):
+ """Contract to set metadata arguments for the request.
+ The value should be JSON-encoded dictionary, e.g.:
+
+ @meta {"arg1": "some value"}
+ """
name = "meta"
@@ -42,6 +55,17 @@
class ReturnsContract(Contract):
+ """Contract to check the output of a callback
+
+ general form:
+ @returns request(s)/item(s) [min=1 [max]]
+
+ e.g.:
+ @returns request
+ @returns request 2
+ @returns request 2 10
+ @returns request 0 10
+ """
name = "returns"
object_type_verifiers: dict[str | None, Callable[[Any], bool]] = {
@@ -91,6 +115,9 @@
class ScrapesContract(Contract):
+ """Contract to check presence of fields in scraped items
+ @scrapes page_name page_body
+ """
name = "scrapes"
@@ -100,4 +127,4 @@ missing = [arg for arg in self.args if arg not in ItemAdapter(x)]
if missing:
missing_fields = ", ".join(missing)
- raise ContractFail(f"Missing fields: {missing_fields}")+ raise ContractFail(f"Missing fields: {missing_fields}")
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/contracts/default.py |
Create docstrings for all classes and functions |
from __future__ import annotations
import csv
import marshal
import pickle
import pprint
from abc import ABC, abstractmethod
from collections.abc import Callable, Iterable, Mapping
from io import BytesIO, TextIOWrapper
from typing import TYPE_CHECKING, Any
from xml.sax.saxutils import XMLGenerator
from xml.sax.xmlreader import AttributesImpl
from itemadapter import ItemAdapter, is_item
from scrapy.item import Field, Item
from scrapy.utils.python import is_listlike, to_bytes, to_unicode
from scrapy.utils.serialize import ScrapyJSONEncoder
if TYPE_CHECKING:
from json import JSONEncoder
__all__ = [
"BaseItemExporter",
"CsvItemExporter",
"JsonItemExporter",
"JsonLinesItemExporter",
"MarshalItemExporter",
"PickleItemExporter",
"PprintItemExporter",
"XmlItemExporter",
]
class BaseItemExporter(ABC):
def __init__(self, *, dont_fail: bool = False, **kwargs: Any):
self._kwargs: dict[str, Any] = kwargs
self._configure(kwargs, dont_fail=dont_fail)
def _configure(self, options: dict[str, Any], dont_fail: bool = False) -> None:
self.encoding: str | None = options.pop("encoding", None)
self.fields_to_export: Mapping[str, str] | Iterable[str] | None = options.pop(
"fields_to_export", None
)
self.export_empty_fields: bool = options.pop("export_empty_fields", False)
self.indent: int | None = options.pop("indent", None)
if not dont_fail and options:
raise TypeError(f"Unexpected options: {', '.join(options.keys())}")
@abstractmethod
def export_item(self, item: Any) -> None:
raise NotImplementedError
def serialize_field(
self, field: Mapping[str, Any] | Field, name: str, value: Any
) -> Any:
serializer: Callable[[Any], Any] = field.get("serializer", lambda x: x)
return serializer(value)
def start_exporting(self) -> None: # noqa: B027
pass
def finish_exporting(self) -> None: # noqa: B027
pass
def _get_serialized_fields(
self, item: Any, default_value: Any = None, include_empty: bool | None = None
) -> Iterable[tuple[str, Any]]:
item = ItemAdapter(item)
if include_empty is None:
include_empty = self.export_empty_fields
if self.fields_to_export is None:
field_iter = item.field_names() if include_empty else item.keys()
elif isinstance(self.fields_to_export, Mapping):
if include_empty:
field_iter = self.fields_to_export.items()
else:
field_iter = (
(x, y) for x, y in self.fields_to_export.items() if x in item
)
elif include_empty:
field_iter = self.fields_to_export
else:
field_iter = (x for x in self.fields_to_export if x in item)
for field_name in field_iter:
if isinstance(field_name, str):
item_field, output_field = field_name, field_name
else:
item_field, output_field = field_name
if item_field in item:
field_meta = item.get_field_meta(item_field)
value = self.serialize_field(field_meta, output_field, item[item_field])
else:
value = default_value
yield output_field, value
class JsonLinesItemExporter(BaseItemExporter):
def __init__(self, file: BytesIO, **kwargs: Any):
super().__init__(dont_fail=True, **kwargs)
self.file: BytesIO = file
self._kwargs.setdefault("ensure_ascii", not self.encoding)
self.encoder: JSONEncoder = ScrapyJSONEncoder(**self._kwargs)
def export_item(self, item: Any) -> None:
itemdict = dict(self._get_serialized_fields(item))
data = self.encoder.encode(itemdict) + "\n"
self.file.write(to_bytes(data, self.encoding))
class JsonItemExporter(BaseItemExporter):
def __init__(self, file: BytesIO, **kwargs: Any):
super().__init__(dont_fail=True, **kwargs)
self.file: BytesIO = file
# there is a small difference between the behaviour or JsonItemExporter.indent
# and ScrapyJSONEncoder.indent. ScrapyJSONEncoder.indent=None is needed to prevent
# the addition of newlines everywhere
json_indent = (
self.indent if self.indent is not None and self.indent > 0 else None
)
self._kwargs.setdefault("indent", json_indent)
self._kwargs.setdefault("ensure_ascii", not self.encoding)
self.encoder = ScrapyJSONEncoder(**self._kwargs)
self.first_item = True
def _beautify_newline(self) -> None:
if self.indent is not None:
self.file.write(b"\n")
def _add_comma_after_first(self) -> None:
if self.first_item:
self.first_item = False
else:
self.file.write(b",")
self._beautify_newline()
def start_exporting(self) -> None:
self.file.write(b"[")
self._beautify_newline()
def finish_exporting(self) -> None:
self._beautify_newline()
self.file.write(b"]")
def export_item(self, item: Any) -> None:
itemdict = dict(self._get_serialized_fields(item))
data = to_bytes(self.encoder.encode(itemdict), self.encoding)
self._add_comma_after_first()
self.file.write(data)
class XmlItemExporter(BaseItemExporter):
def __init__(self, file: BytesIO, **kwargs: Any):
self.item_element = kwargs.pop("item_element", "item")
self.root_element = kwargs.pop("root_element", "items")
super().__init__(**kwargs)
if not self.encoding:
self.encoding = "utf-8"
self.xg = XMLGenerator(file, encoding=self.encoding)
def _beautify_newline(self, new_item: bool = False) -> None:
if self.indent is not None and (self.indent > 0 or new_item):
self.xg.characters("\n")
def _beautify_indent(self, depth: int = 1) -> None:
if self.indent:
self.xg.characters(" " * self.indent * depth)
def start_exporting(self) -> None:
self.xg.startDocument()
self.xg.startElement(self.root_element, AttributesImpl({}))
self._beautify_newline(new_item=True)
def export_item(self, item: Any) -> None:
self._beautify_indent(depth=1)
self.xg.startElement(self.item_element, AttributesImpl({}))
self._beautify_newline()
for name, value in self._get_serialized_fields(item, default_value=""):
self._export_xml_field(name, value, depth=2)
self._beautify_indent(depth=1)
self.xg.endElement(self.item_element)
self._beautify_newline(new_item=True)
def finish_exporting(self) -> None:
self.xg.endElement(self.root_element)
self.xg.endDocument()
def _export_xml_field(self, name: str, serialized_value: Any, depth: int) -> None:
self._beautify_indent(depth=depth)
self.xg.startElement(name, AttributesImpl({}))
if hasattr(serialized_value, "items"):
self._beautify_newline()
for subname, value in serialized_value.items():
self._export_xml_field(subname, value, depth=depth + 1)
self._beautify_indent(depth=depth)
elif is_listlike(serialized_value):
self._beautify_newline()
for value in serialized_value:
self._export_xml_field("value", value, depth=depth + 1)
self._beautify_indent(depth=depth)
elif isinstance(serialized_value, str):
self.xg.characters(serialized_value)
else:
self.xg.characters(str(serialized_value))
self.xg.endElement(name)
self._beautify_newline()
class CsvItemExporter(BaseItemExporter):
def __init__(
self,
file: BytesIO,
include_headers_line: bool = True,
join_multivalued: str = ",",
errors: str | None = None,
**kwargs: Any,
):
super().__init__(dont_fail=True, **kwargs)
if not self.encoding:
self.encoding = "utf-8"
self.include_headers_line = include_headers_line
self.stream = TextIOWrapper(
file,
line_buffering=False,
write_through=True,
encoding=self.encoding,
newline="", # Windows needs this https://github.com/scrapy/scrapy/issues/3034
errors=errors,
)
self.csv_writer = csv.writer(self.stream, **self._kwargs)
self._headers_not_written = True
self._join_multivalued = join_multivalued
def serialize_field(
self, field: Mapping[str, Any] | Field, name: str, value: Any
) -> Any:
serializer: Callable[[Any], Any] = field.get("serializer", self._join_if_needed)
return serializer(value)
def _join_if_needed(self, value: Any) -> Any:
if isinstance(value, (list, tuple)):
try:
return self._join_multivalued.join(value)
except TypeError: # list in value may not contain strings
pass
return value
def export_item(self, item: Any) -> None:
if self._headers_not_written:
self._headers_not_written = False
self._write_headers_and_set_fields_to_export(item)
fields = self._get_serialized_fields(item, default_value="", include_empty=True)
values = list(self._build_row(x for _, x in fields))
self.csv_writer.writerow(values)
def finish_exporting(self) -> None:
self.stream.detach() # Avoid closing the wrapped file.
def _build_row(self, values: Iterable[Any]) -> Iterable[Any]:
for s in values:
try:
yield to_unicode(s, self.encoding)
except TypeError:
yield s
def _write_headers_and_set_fields_to_export(self, item: Any) -> None:
if self.include_headers_line:
if not self.fields_to_export:
# use declared field names, or keys if the item is a dict
self.fields_to_export = ItemAdapter(item).field_names()
fields: Iterable[str]
if isinstance(self.fields_to_export, Mapping):
fields = self.fields_to_export.values()
else:
assert self.fields_to_export
fields = self.fields_to_export
row = list(self._build_row(fields))
self.csv_writer.writerow(row)
class PickleItemExporter(BaseItemExporter):
def __init__(self, file: BytesIO, protocol: int = 4, **kwargs: Any):
super().__init__(**kwargs)
self.file: BytesIO = file
self.protocol: int = protocol
def export_item(self, item: Any) -> None:
d = dict(self._get_serialized_fields(item))
pickle.dump(d, self.file, self.protocol)
class MarshalItemExporter(BaseItemExporter):
def __init__(self, file: BytesIO, **kwargs: Any):
super().__init__(**kwargs)
self.file: BytesIO = file
def export_item(self, item: Any) -> None:
marshal.dump(dict(self._get_serialized_fields(item)), self.file)
class PprintItemExporter(BaseItemExporter):
def __init__(self, file: BytesIO, **kwargs: Any):
super().__init__(**kwargs)
self.file: BytesIO = file
def export_item(self, item: Any) -> None:
itemdict = dict(self._get_serialized_fields(item))
self.file.write(to_bytes(pprint.pformat(itemdict) + "\n"))
class PythonItemExporter(BaseItemExporter):
def _configure(self, options: dict[str, Any], dont_fail: bool = False) -> None:
super()._configure(options, dont_fail)
if not self.encoding:
self.encoding = "utf-8"
def serialize_field(
self, field: Mapping[str, Any] | Field, name: str, value: Any
) -> Any:
serializer: Callable[[Any], Any] = field.get(
"serializer", self._serialize_value
)
return serializer(value)
def _serialize_value(self, value: Any) -> Any:
if isinstance(value, Item):
return self.export_item(value)
if isinstance(value, (str, bytes)):
return to_unicode(value, encoding=self.encoding)
if is_item(value):
return dict(self._serialize_item(value))
if is_listlike(value):
return [self._serialize_value(v) for v in value]
return value
def _serialize_item(self, item: Any) -> Iterable[tuple[str | bytes, Any]]:
for key, value in ItemAdapter(item).items():
yield key, self._serialize_value(value)
def export_item(self, item: Any) -> dict[str | bytes, Any]: # type: ignore[override]
result: dict[str | bytes, Any] = dict(self._get_serialized_fields(item))
return result | --- +++ @@ -1,3 +1,6 @@+"""
+Item Exporters are used to export/serialize items into different formats.
+"""
from __future__ import annotations
@@ -39,6 +42,10 @@ self._configure(kwargs, dont_fail=dont_fail)
def _configure(self, options: dict[str, Any], dont_fail: bool = False) -> None:
+ """Configure the exporter by popping options from the ``options`` dict.
+ If dont_fail is set, it won't raise an exception on unexpected options
+ (useful for using with keyword arguments in subclasses ``__init__`` methods)
+ """
self.encoding: str | None = options.pop("encoding", None)
self.fields_to_export: Mapping[str, str] | Iterable[str] | None = options.pop(
"fields_to_export", None
@@ -67,6 +74,9 @@ def _get_serialized_fields(
self, item: Any, default_value: Any = None, include_empty: bool | None = None
) -> Iterable[tuple[str, Any]]:
+ """Return the fields to export as an iterable of tuples
+ (name, serialized_value)
+ """
item = ItemAdapter(item)
if include_empty is None:
@@ -296,6 +306,13 @@
class MarshalItemExporter(BaseItemExporter):
+ """Exports items in a Python-specific binary format (see
+ :mod:`marshal`).
+
+ :param file: The file-like object to use for exporting the data. Its
+ ``write`` method should accept :class:`bytes` (a disk file
+ opened in binary mode, a :class:`~io.BytesIO` object, etc)
+ """
def __init__(self, file: BytesIO, **kwargs: Any):
super().__init__(**kwargs)
@@ -316,6 +333,14 @@
class PythonItemExporter(BaseItemExporter):
+ """This is a base class for item exporters that extends
+ :class:`BaseItemExporter` with support for nested items.
+
+ It serializes items to built-in Python types, so that any serialization
+ library (e.g. :mod:`json` or msgpack_) can be used on top of it.
+
+ .. _msgpack: https://pypi.org/project/msgpack/
+ """
def _configure(self, options: dict[str, Any], dont_fail: bool = False) -> None:
super()._configure(options, dont_fail)
@@ -347,4 +372,4 @@
def export_item(self, item: Any) -> dict[str | bytes, Any]: # type: ignore[override]
result: dict[str | bytes, Any] = dict(self._get_serialized_fields(item))
- return result+ return result
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/exporters.py |
Add professional docstrings to my codebase | from __future__ import annotations
import json
import logging
from abc import abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING, Any
from warnings import warn
# working around https://github.com/sphinx-doc/sphinx/issues/10400
from twisted.internet.defer import Deferred # noqa: TC002
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.spiders import Spider # noqa: TC001
from scrapy.utils.job import job_dir
from scrapy.utils.misc import build_from_crawler, load_object
from scrapy.utils.python import global_object_name
if TYPE_CHECKING:
# requires queuelib >= 1.6.2
from queuelib.queue import BaseQueue
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.dupefilters import BaseDupeFilter
from scrapy.http.request import Request
from scrapy.pqueues import ScrapyPriorityQueue
from scrapy.statscollectors import StatsCollector
logger = logging.getLogger(__name__)
class BaseSchedulerMeta(type):
def __instancecheck__(cls, instance: Any) -> bool:
return cls.__subclasscheck__(type(instance))
def __subclasscheck__(cls, subclass: type) -> bool:
return (
hasattr(subclass, "has_pending_requests")
and callable(subclass.has_pending_requests)
and hasattr(subclass, "enqueue_request")
and callable(subclass.enqueue_request)
and hasattr(subclass, "next_request")
and callable(subclass.next_request)
)
class BaseScheduler(metaclass=BaseSchedulerMeta):
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls()
def open(self, spider: Spider) -> Deferred[None] | None:
def close(self, reason: str) -> Deferred[None] | None:
@abstractmethod
def has_pending_requests(self) -> bool:
raise NotImplementedError
@abstractmethod
def enqueue_request(self, request: Request) -> bool:
raise NotImplementedError
@abstractmethod
def next_request(self) -> Request | None:
raise NotImplementedError
class Scheduler(BaseScheduler):
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
dupefilter_cls = load_object(crawler.settings["DUPEFILTER_CLASS"])
return cls(
dupefilter=build_from_crawler(dupefilter_cls, crawler),
jobdir=job_dir(crawler.settings),
dqclass=load_object(crawler.settings["SCHEDULER_DISK_QUEUE"]),
mqclass=load_object(crawler.settings["SCHEDULER_MEMORY_QUEUE"]),
logunser=crawler.settings.getbool("SCHEDULER_DEBUG"),
stats=crawler.stats,
pqclass=load_object(crawler.settings["SCHEDULER_PRIORITY_QUEUE"]),
crawler=crawler,
)
def __init__(
self,
dupefilter: BaseDupeFilter,
jobdir: str | None = None,
dqclass: type[BaseQueue] | None = None,
mqclass: type[BaseQueue] | None = None,
logunser: bool = False,
stats: StatsCollector | None = None,
pqclass: type[ScrapyPriorityQueue] | None = None,
crawler: Crawler | None = None,
):
self.df: BaseDupeFilter = dupefilter
self.dqdir: str | None = self._dqdir(jobdir)
self.pqclass: type[ScrapyPriorityQueue] | None = pqclass
self.dqclass: type[BaseQueue] | None = dqclass
self.mqclass: type[BaseQueue] | None = mqclass
self.logunser: bool = logunser
self.stats: StatsCollector | None = stats
self.crawler: Crawler | None = crawler
self._sdqclass: type[BaseQueue] | None = self._get_start_queue_cls(
crawler, "DISK"
)
self._smqclass: type[BaseQueue] | None = self._get_start_queue_cls(
crawler, "MEMORY"
)
def _get_start_queue_cls(
self, crawler: Crawler | None, queue: str
) -> type[BaseQueue] | None:
if crawler is None:
return None
cls = crawler.settings[f"SCHEDULER_START_{queue}_QUEUE"]
if not cls:
return None
return load_object(cls)
def has_pending_requests(self) -> bool:
return len(self) > 0
def open(self, spider: Spider) -> Deferred[None] | None:
self.spider: Spider = spider
self.mqs: ScrapyPriorityQueue = self._mq()
self.dqs: ScrapyPriorityQueue | None = self._dq() if self.dqdir else None
return self.df.open()
def close(self, reason: str) -> Deferred[None] | None:
if self.dqs is not None:
state = self.dqs.close()
assert isinstance(self.dqdir, str)
self._write_dqs_state(self.dqdir, state)
return self.df.close(reason)
def enqueue_request(self, request: Request) -> bool:
if not request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)
return False
dqok = self._dqpush(request)
assert self.stats is not None
if dqok:
self.stats.inc_value("scheduler/enqueued/disk")
else:
self._mqpush(request)
self.stats.inc_value("scheduler/enqueued/memory")
self.stats.inc_value("scheduler/enqueued")
return True
def next_request(self) -> Request | None:
request: Request | None = self.mqs.pop()
assert self.stats is not None
if request is not None:
self.stats.inc_value("scheduler/dequeued/memory")
else:
request = self._dqpop()
if request is not None:
self.stats.inc_value("scheduler/dequeued/disk")
if request is not None:
self.stats.inc_value("scheduler/dequeued")
return request
def __len__(self) -> int:
return len(self.dqs) + len(self.mqs) if self.dqs is not None else len(self.mqs)
def _dqpush(self, request: Request) -> bool:
if self.dqs is None:
return False
try:
self.dqs.push(request)
except ValueError as e: # non serializable request
if self.logunser:
msg = (
"Unable to serialize request: %(request)s - reason:"
" %(reason)s - no more unserializable requests will be"
" logged (stats being collected)"
)
logger.warning(
msg,
{"request": request, "reason": e},
exc_info=True,
extra={"spider": self.spider},
)
self.logunser = False
assert self.stats is not None
self.stats.inc_value("scheduler/unserializable")
return False
return True
def _mqpush(self, request: Request) -> None:
self.mqs.push(request)
def _dqpop(self) -> Request | None:
if self.dqs is not None:
return self.dqs.pop()
return None
def _mq(self) -> ScrapyPriorityQueue:
assert self.crawler
assert self.pqclass
try:
return build_from_crawler(
self.pqclass,
self.crawler,
downstream_queue_cls=self.mqclass,
key="",
start_queue_cls=self._smqclass,
)
except TypeError: # pragma: no cover
warn(
f"The __init__ method of {global_object_name(self.pqclass)} "
f"does not support a `start_queue_cls` keyword-only "
f"parameter.",
ScrapyDeprecationWarning,
)
return build_from_crawler(
self.pqclass,
self.crawler,
downstream_queue_cls=self.mqclass,
key="",
)
def _dq(self) -> ScrapyPriorityQueue:
assert self.crawler
assert self.dqdir
assert self.pqclass
state = self._read_dqs_state(self.dqdir)
try:
q = build_from_crawler(
self.pqclass,
self.crawler,
downstream_queue_cls=self.dqclass,
key=self.dqdir,
startprios=state,
start_queue_cls=self._sdqclass,
)
except TypeError: # pragma: no cover
warn(
f"The __init__ method of {global_object_name(self.pqclass)} "
f"does not support a `start_queue_cls` keyword-only "
f"parameter.",
ScrapyDeprecationWarning,
)
q = build_from_crawler(
self.pqclass,
self.crawler,
downstream_queue_cls=self.dqclass,
key=self.dqdir,
startprios=state,
)
if q:
logger.info(
"Resuming crawl (%(queuesize)d requests scheduled)",
{"queuesize": len(q)},
extra={"spider": self.spider},
)
return q
def _dqdir(self, jobdir: str | None) -> str | None:
if jobdir:
dqdir = Path(jobdir, "requests.queue")
if not dqdir.exists():
dqdir.mkdir(parents=True)
return str(dqdir)
return None
def _read_dqs_state(self, dqdir: str) -> Any:
path = Path(dqdir, "active.json")
if not path.exists():
return []
with path.open(encoding="utf-8") as f:
return json.load(f)
def _write_dqs_state(self, dqdir: str, state: Any) -> None:
with Path(dqdir, "active.json").open("w", encoding="utf-8") as f:
json.dump(state, f) | --- +++ @@ -34,6 +34,9 @@
class BaseSchedulerMeta(type):
+ """
+ Metaclass to check scheduler classes against the necessary interface
+ """
def __instancecheck__(cls, instance: Any) -> bool:
return cls.__subclasscheck__(type(instance))
@@ -50,29 +53,205 @@
class BaseScheduler(metaclass=BaseSchedulerMeta):
+ """The scheduler component is responsible for storing requests received
+ from the engine, and feeding them back upon request (also to the engine).
+
+ The original sources of said requests are:
+
+ * Spider: ``start`` method, requests created for URLs in the ``start_urls`` attribute, request callbacks
+ * Spider middleware: ``process_spider_output`` and ``process_spider_exception`` methods
+ * Downloader middleware: ``process_request``, ``process_response`` and ``process_exception`` methods
+
+ The order in which the scheduler returns its stored requests (via the ``next_request`` method)
+ plays a great part in determining the order in which those requests are downloaded. See :ref:`request-order`.
+
+ The methods defined in this class constitute the minimal interface that the Scrapy engine will interact with.
+ """
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
+ """
+ Factory method which receives the current :class:`~scrapy.crawler.Crawler` object as argument.
+ """
return cls()
def open(self, spider: Spider) -> Deferred[None] | None:
+ """
+ Called when the spider is opened by the engine. It receives the spider
+ instance as argument and it's useful to execute initialization code.
+
+ :param spider: the spider object for the current crawl
+ :type spider: :class:`~scrapy.spiders.Spider`
+ """
def close(self, reason: str) -> Deferred[None] | None:
+ """
+ Called when the spider is closed by the engine. It receives the reason why the crawl
+ finished as argument and it's useful to execute cleaning code.
+
+ :param reason: a string which describes the reason why the spider was closed
+ :type reason: :class:`str`
+ """
@abstractmethod
def has_pending_requests(self) -> bool:
+ """
+ ``True`` if the scheduler has enqueued requests, ``False`` otherwise
+ """
raise NotImplementedError
@abstractmethod
def enqueue_request(self, request: Request) -> bool:
+ """
+ Process a request received by the engine.
+
+ Return ``True`` if the request is stored correctly, ``False`` otherwise.
+
+ If ``False``, the engine will fire a ``request_dropped`` signal, and
+ will not make further attempts to schedule the request at a later time.
+ For reference, the default Scrapy scheduler returns ``False`` when the
+ request is rejected by the dupefilter.
+ """
raise NotImplementedError
@abstractmethod
def next_request(self) -> Request | None:
+ """
+ Return the next :class:`~scrapy.Request` to be processed, or ``None``
+ to indicate that there are no requests to be considered ready at the moment.
+
+ Returning ``None`` implies that no request from the scheduler will be sent
+ to the downloader in the current reactor cycle. The engine will continue
+ calling ``next_request`` until ``has_pending_requests`` is ``False``.
+ """
raise NotImplementedError
class Scheduler(BaseScheduler):
+ r"""Default scheduler.
+
+ Requests are stored into priority queues
+ (:setting:`SCHEDULER_PRIORITY_QUEUE`) that sort requests by
+ :attr:`~scrapy.http.Request.priority`.
+
+ By default, a single, memory-based priority queue is used for all requests.
+ When using :setting:`JOBDIR`, a disk-based priority queue is also created,
+ and only unserializable requests are stored in the memory-based priority
+ queue. For a given priority value, requests in memory take precedence over
+ requests in disk.
+
+ Each priority queue stores requests in separate internal queues, one per
+ priority value. The memory priority queue uses
+ :setting:`SCHEDULER_MEMORY_QUEUE` queues, while the disk priority queue
+ uses :setting:`SCHEDULER_DISK_QUEUE` queues. The internal queues determine
+ :ref:`request order <request-order>` when requests have the same priority.
+ :ref:`Start requests <start-requests>` are stored into separate internal
+ queues by default, and :ref:`ordered differently <start-request-order>`.
+
+ Duplicate requests are filtered out with an instance of
+ :setting:`DUPEFILTER_CLASS`.
+
+ .. _request-order:
+
+ Request order
+ =============
+
+ With default settings, pending requests are stored in a LIFO_ queue
+ (:ref:`except for start requests <start-request-order>`). As a result,
+ crawling happens in `DFO order`_, which is usually the most convenient
+ crawl order. However, you can enforce :ref:`BFO <bfo>` or :ref:`a custom
+ order <custom-request-order>` (:ref:`except for the first few requests
+ <concurrency-v-order>`).
+
+ .. _LIFO: https://en.wikipedia.org/wiki/Stack_(abstract_data_type)
+ .. _DFO order: https://en.wikipedia.org/wiki/Depth-first_search
+
+ .. _start-request-order:
+
+ Start request order
+ -------------------
+
+ :ref:`Start requests <start-requests>` are sent in the order they are
+ yielded from :meth:`~scrapy.Spider.start`, and given the same
+ :attr:`~scrapy.http.Request.priority`, other requests take precedence over
+ start requests.
+
+ You can set :setting:`SCHEDULER_START_MEMORY_QUEUE` and
+ :setting:`SCHEDULER_START_DISK_QUEUE` to ``None`` to handle start requests
+ the same as other requests when it comes to order and priority.
+
+
+ .. _bfo:
+
+ Crawling in BFO order
+ ---------------------
+
+ If you do want to crawl in `BFO order`_, you can do it by setting the
+ following :ref:`settings <topics-settings>`:
+
+ | :setting:`DEPTH_PRIORITY` = ``1``
+ | :setting:`SCHEDULER_DISK_QUEUE` =
+ ``"scrapy.squeues.PickleFifoDiskQueue"``
+ | :setting:`SCHEDULER_MEMORY_QUEUE` = ``"scrapy.squeues.FifoMemoryQueue"``
+
+ .. _BFO order: https://en.wikipedia.org/wiki/Breadth-first_search
+
+
+ .. _custom-request-order:
+
+ Crawling in a custom order
+ --------------------------
+
+ You can manually set :attr:`~scrapy.http.Request.priority` on requests to
+ force a specific request order.
+
+
+ .. _concurrency-v-order:
+
+ Concurrency affects order
+ -------------------------
+
+ While pending requests are below the configured values of
+ :setting:`CONCURRENT_REQUESTS`, :setting:`CONCURRENT_REQUESTS_PER_DOMAIN`
+ or :setting:`CONCURRENT_REQUESTS_PER_IP`, those requests are sent
+ concurrently.
+
+ As a result, the first few requests of a crawl may not follow the desired
+ order. Lowering those settings to ``1`` enforces the desired order except
+ for the very first request, but it significantly slows down the crawl as a
+ whole.
+
+ Job directory contents
+ ======================
+
+ .. warning:: The files that this class generates in the :ref:`job directory
+ <job-dir>` are an implementation detail, and may change without a
+ warning in a future version of Scrapy. Do not rely on the following
+ information for anything other than debugging purposes.
+
+ When using :setting:`JOBDIR`, this scheduler class:
+
+ - Creates a directory named ``requests.queue`` inside the :ref:`job
+ directory <job-dir>`, meant to keep track of all requests stored in
+ the scheduler (i.e. not downloaded yet).
+
+ - Generates inside that directory an ``active.json`` file with a JSON
+ representation of the state (``startprios``) of
+ :setting:`SCHEDULER_PRIORITY_QUEUE`.
+
+ The file is generated whenever the job stops (cleanly) and is loaded
+ when resuming the job.
+
+ - Instantiates the configured :setting:`SCHEDULER_PRIORITY_QUEUE` with
+ ``requests.queue/`` as persistence directory (*key*) and
+ :setting:`SCHEDULER_DISK_QUEUE` as *downstream_queue_cls*. The priority
+ queue may create additional files and directories inside that
+ directory, directly or though instances of
+ :setting:`SCHEDULER_DISK_QUEUE`.
+
+ This scheduler class also uses the configured :setting:`DUPEFILTER_CLASS`,
+ which may also write data inside the job directory.
+ """
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
@@ -99,6 +278,42 @@ pqclass: type[ScrapyPriorityQueue] | None = None,
crawler: Crawler | None = None,
):
+ """Initialize the scheduler.
+
+ :param dupefilter: An object responsible for checking and filtering duplicate requests.
+ The value for the :setting:`DUPEFILTER_CLASS` setting is used by default.
+ :type dupefilter: :class:`scrapy.dupefilters.BaseDupeFilter` instance or similar:
+ any class that implements the `BaseDupeFilter` interface
+
+ :param jobdir: The path of a directory to be used for persisting the crawl's state.
+ The value for the :setting:`JOBDIR` setting is used by default.
+ See :ref:`topics-jobs`.
+ :type jobdir: :class:`str` or ``None``
+
+ :param dqclass: A class to be used as persistent request queue.
+ The value for the :setting:`SCHEDULER_DISK_QUEUE` setting is used by default.
+ :type dqclass: class
+
+ :param mqclass: A class to be used as non-persistent request queue.
+ The value for the :setting:`SCHEDULER_MEMORY_QUEUE` setting is used by default.
+ :type mqclass: class
+
+ :param logunser: A boolean that indicates whether or not unserializable requests should be logged.
+ The value for the :setting:`SCHEDULER_DEBUG` setting is used by default.
+ :type logunser: bool
+
+ :param stats: A stats collector object to record stats about the request scheduling process.
+ The value for the :setting:`STATS_CLASS` setting is used by default.
+ :type stats: :class:`scrapy.statscollectors.StatsCollector` instance or similar:
+ any class that implements the `StatsCollector` interface
+
+ :param pqclass: A class to be used as priority queue for requests.
+ The value for the :setting:`SCHEDULER_PRIORITY_QUEUE` setting is used by default.
+ :type pqclass: class
+
+ :param crawler: The crawler object corresponding to the current crawl.
+ :type crawler: :class:`scrapy.crawler.Crawler`
+ """
self.df: BaseDupeFilter = dupefilter
self.dqdir: str | None = self._dqdir(jobdir)
self.pqclass: type[ScrapyPriorityQueue] | None = pqclass
@@ -128,12 +343,21 @@ return len(self) > 0
def open(self, spider: Spider) -> Deferred[None] | None:
+ """
+ (1) initialize the memory queue
+ (2) initialize the disk queue if the ``jobdir`` attribute is a valid directory
+ (3) return the result of the dupefilter's ``open`` method
+ """
self.spider: Spider = spider
self.mqs: ScrapyPriorityQueue = self._mq()
self.dqs: ScrapyPriorityQueue | None = self._dq() if self.dqdir else None
return self.df.open()
def close(self, reason: str) -> Deferred[None] | None:
+ """
+ (1) dump pending requests to disk if there is a disk queue
+ (2) return the result of the dupefilter's ``close`` method
+ """
if self.dqs is not None:
state = self.dqs.close()
assert isinstance(self.dqdir, str)
@@ -141,6 +365,15 @@ return self.df.close(reason)
def enqueue_request(self, request: Request) -> bool:
+ """
+ Unless the received request is filtered out by the Dupefilter, attempt to push
+ it into the disk queue, falling back to pushing it into the memory queue.
+
+ Increment the appropriate stats, such as: ``scheduler/enqueued``,
+ ``scheduler/enqueued/disk``, ``scheduler/enqueued/memory``.
+
+ Return ``True`` if the request was stored successfully, ``False`` otherwise.
+ """
if not request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)
return False
@@ -155,6 +388,14 @@ return True
def next_request(self) -> Request | None:
+ """
+ Return a :class:`~scrapy.Request` object from the memory queue,
+ falling back to the disk queue if the memory queue is empty.
+ Return ``None`` if there are no more enqueued requests.
+
+ Increment the appropriate stats, such as: ``scheduler/dequeued``,
+ ``scheduler/dequeued/disk``, ``scheduler/dequeued/memory``.
+ """
request: Request | None = self.mqs.pop()
assert self.stats is not None
if request is not None:
@@ -168,6 +409,9 @@ return request
def __len__(self) -> int:
+ """
+ Return the total amount of enqueued requests
+ """
return len(self.dqs) + len(self.mqs) if self.dqs is not None else len(self.mqs)
def _dqpush(self, request: Request) -> bool:
@@ -203,6 +447,7 @@ return None
def _mq(self) -> ScrapyPriorityQueue:
+ """Create a new priority queue instance, with in-memory storage"""
assert self.crawler
assert self.pqclass
try:
@@ -228,6 +473,7 @@ )
def _dq(self) -> ScrapyPriorityQueue:
+ """Create a new priority queue instance, with disk storage"""
assert self.crawler
assert self.dqdir
assert self.pqclass
@@ -264,6 +510,7 @@ return q
def _dqdir(self, jobdir: str | None) -> str | None:
+ """Return a folder name to keep disk queue state at"""
if jobdir:
dqdir = Path(jobdir, "requests.queue")
if not dqdir.exists():
@@ -280,4 +527,4 @@
def _write_dqs_state(self, dqdir: str, state: Any) -> None:
with Path(dqdir, "active.json").open("w", encoding="utf-8") as f:
- json.dump(state, f)+ json.dump(state, f)
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/core/scheduler.py |
Generate docstrings for this script |
from __future__ import annotations
from threading import Thread
from typing import TYPE_CHECKING, Any
from scrapy.commands import ScrapyCommand
from scrapy.http import Request
from scrapy.shell import Shell
from scrapy.utils.defer import _schedule_coro
from scrapy.utils.spider import DefaultSpider, spidercls_for_request
from scrapy.utils.url import guess_scheme
if TYPE_CHECKING:
from argparse import ArgumentParser, Namespace
from scrapy import Spider
class Command(ScrapyCommand):
default_settings = {
"DUPEFILTER_CLASS": "scrapy.dupefilters.BaseDupeFilter",
"KEEP_ALIVE": True,
"LOGSTATS_INTERVAL": 0,
}
def syntax(self) -> str:
return "[url|file]"
def short_desc(self) -> str:
return "Interactive scraping console"
def long_desc(self) -> str:
return (
"Interactive console for scraping the given url or file. "
"Use ./file.html syntax or full path for local file."
)
def add_options(self, parser: ArgumentParser) -> None:
super().add_options(parser)
parser.add_argument(
"-c",
dest="code",
help="evaluate the code in the shell, print the result and exit",
)
parser.add_argument("--spider", dest="spider", help="use this spider")
parser.add_argument(
"--no-redirect",
dest="no_redirect",
action="store_true",
default=False,
help="do not handle HTTP 3xx status codes and print response as-is",
)
def update_vars(self, vars: dict[str, Any]) -> None: # noqa: A002
def run(self, args: list[str], opts: Namespace) -> None:
url = args[0] if args else None
if url:
# first argument may be a local file
url = guess_scheme(url)
assert self.crawler_process
spider_loader = self.crawler_process.spider_loader
spidercls: type[Spider] = DefaultSpider
if opts.spider:
spidercls = spider_loader.load(opts.spider)
elif url:
spidercls = spidercls_for_request(
spider_loader, Request(url), spidercls, log_multiple=True
)
# The crawler is created this way since the Shell manually handles the
# crawling engine, so the set up in the crawl method won't work
crawler = self.crawler_process._create_crawler(spidercls)
crawler._apply_settings()
# The Shell class needs a persistent engine in the crawler
crawler.engine = crawler._create_engine()
_schedule_coro(crawler.engine.start_async(_start_request_processing=False))
self._start_crawler_thread()
shell = Shell(crawler, update_vars=self.update_vars, code=opts.code)
shell.start(url=url, redirect=not opts.no_redirect)
def _start_crawler_thread(self) -> None:
assert self.crawler_process
t = Thread(
target=self.crawler_process.start,
kwargs={"stop_after_crawl": False, "install_signal_handlers": False},
)
t.daemon = True
t.start() | --- +++ @@ -1,3 +1,8 @@+"""
+Scrapy Shell
+
+See documentation in docs/topics/shell.rst
+"""
from __future__ import annotations
@@ -53,6 +58,9 @@ )
def update_vars(self, vars: dict[str, Any]) -> None: # noqa: A002
+ """You can use this function to update the Scrapy objects that will be
+ available in the shell
+ """
def run(self, args: list[str], opts: Namespace) -> None:
url = args[0] if args else None
@@ -91,4 +99,4 @@ kwargs={"stop_after_crawl": False, "install_signal_handlers": False},
)
t.daemon = True
- t.start()+ t.start()
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/commands/shell.py |
Document classes and their methods | from __future__ import annotations
import gzip
import logging
import pickle
from email.utils import mktime_tz, parsedate_tz
from importlib import import_module
from pathlib import Path
from time import time
from typing import IO, TYPE_CHECKING, Any, Concatenate, cast
from weakref import WeakKeyDictionary
from w3lib.http import headers_dict_to_raw, headers_raw_to_dict
from scrapy.http import Headers, Response
from scrapy.responsetypes import responsetypes
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.project import data_path
from scrapy.utils.python import to_bytes, to_unicode
if TYPE_CHECKING:
import os
from collections.abc import Callable
from types import ModuleType
from scrapy.http.request import Request
from scrapy.settings import BaseSettings
from scrapy.spiders import Spider
from scrapy.utils.request import RequestFingerprinterProtocol
logger = logging.getLogger(__name__)
class DummyPolicy:
def __init__(self, settings: BaseSettings):
self.ignore_schemes: list[str] = settings.getlist("HTTPCACHE_IGNORE_SCHEMES")
self.ignore_http_codes: list[int] = [
int(x) for x in settings.getlist("HTTPCACHE_IGNORE_HTTP_CODES")
]
def should_cache_request(self, request: Request) -> bool:
return urlparse_cached(request).scheme not in self.ignore_schemes
def should_cache_response(self, response: Response, request: Request) -> bool:
return response.status not in self.ignore_http_codes
def is_cached_response_fresh(
self, cachedresponse: Response, request: Request
) -> bool:
return True
def is_cached_response_valid(
self, cachedresponse: Response, response: Response, request: Request
) -> bool:
return True
class RFC2616Policy:
MAXAGE = 3600 * 24 * 365 # one year
def __init__(self, settings: BaseSettings):
self.always_store: bool = settings.getbool("HTTPCACHE_ALWAYS_STORE")
self.ignore_schemes: list[str] = settings.getlist("HTTPCACHE_IGNORE_SCHEMES")
self._cc_parsed: WeakKeyDictionary[
Request | Response, dict[bytes, bytes | None]
] = WeakKeyDictionary()
self.ignore_response_cache_controls: list[bytes] = [
to_bytes(cc)
for cc in settings.getlist("HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS")
]
def _parse_cachecontrol(self, r: Request | Response) -> dict[bytes, bytes | None]:
if r not in self._cc_parsed:
cch = r.headers.get(b"Cache-Control", b"")
assert cch is not None
parsed = parse_cachecontrol(cch)
if isinstance(r, Response):
for key in self.ignore_response_cache_controls:
parsed.pop(key, None)
self._cc_parsed[r] = parsed
return self._cc_parsed[r]
def should_cache_request(self, request: Request) -> bool:
if urlparse_cached(request).scheme in self.ignore_schemes:
return False
cc = self._parse_cachecontrol(request)
# obey user-agent directive "Cache-Control: no-store"
return b"no-store" not in cc
def should_cache_response(self, response: Response, request: Request) -> bool:
# What is cacheable - https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.1
# Response cacheability - https://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.4
# Status code 206 is not included because cache can not deal with partial contents
cc = self._parse_cachecontrol(response)
# obey directive "Cache-Control: no-store"
if b"no-store" in cc:
return False
# Never cache 304 (Not Modified) responses
if response.status == 304:
return False
# Cache unconditionally if configured to do so
if self.always_store:
return True
# Any hint on response expiration is good
if b"max-age" in cc or b"Expires" in response.headers:
return True
# Firefox fallbacks this statuses to one year expiration if none is set
if response.status in (300, 301, 308):
return True
# Other statuses without expiration requires at least one validator
if response.status in (200, 203, 401):
return b"Last-Modified" in response.headers or b"ETag" in response.headers
# Any other is probably not eligible for caching
# Makes no sense to cache responses that does not contain expiration
# info and can not be revalidated
return False
def is_cached_response_fresh(
self, cachedresponse: Response, request: Request
) -> bool:
cc = self._parse_cachecontrol(cachedresponse)
ccreq = self._parse_cachecontrol(request)
if b"no-cache" in cc or b"no-cache" in ccreq:
return False
now = time()
freshnesslifetime = self._compute_freshness_lifetime(
cachedresponse, request, now
)
currentage = self._compute_current_age(cachedresponse, request, now)
reqmaxage = self._get_max_age(ccreq)
if reqmaxage is not None:
freshnesslifetime = min(freshnesslifetime, reqmaxage)
if currentage < freshnesslifetime:
return True
if b"max-stale" in ccreq and b"must-revalidate" not in cc:
# From RFC2616: "Indicates that the client is willing to
# accept a response that has exceeded its expiration time.
# If max-stale is assigned a value, then the client is
# willing to accept a response that has exceeded its
# expiration time by no more than the specified number of
# seconds. If no value is assigned to max-stale, then the
# client is willing to accept a stale response of any age."
staleage = ccreq[b"max-stale"]
if staleage is None:
return True
try:
if currentage < freshnesslifetime + max(0, int(staleage)):
return True
except ValueError:
pass
# Cached response is stale, try to set validators if any
self._set_conditional_validators(request, cachedresponse)
return False
def is_cached_response_valid(
self, cachedresponse: Response, response: Response, request: Request
) -> bool:
# Use the cached response if the new response is a server error,
# as long as the old response didn't specify must-revalidate.
if response.status >= 500:
cc = self._parse_cachecontrol(cachedresponse)
if b"must-revalidate" not in cc:
return True
# Use the cached response if the server says it hasn't changed.
return response.status == 304
def _set_conditional_validators(
self, request: Request, cachedresponse: Response
) -> None:
if b"Last-Modified" in cachedresponse.headers:
request.headers[b"If-Modified-Since"] = cachedresponse.headers[
b"Last-Modified"
]
if b"ETag" in cachedresponse.headers:
request.headers[b"If-None-Match"] = cachedresponse.headers[b"ETag"]
def _get_max_age(self, cc: dict[bytes, bytes | None]) -> int | None:
try:
return max(0, int(cc[b"max-age"])) # type: ignore[arg-type]
except (KeyError, ValueError):
return None
def _compute_freshness_lifetime(
self, response: Response, request: Request, now: float
) -> float:
# Reference nsHttpResponseHead::ComputeFreshnessLifetime
# https://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#706
cc = self._parse_cachecontrol(response)
maxage = self._get_max_age(cc)
if maxage is not None:
return maxage
# Parse date header or synthesize it if none exists
date = rfc1123_to_epoch(response.headers.get(b"Date")) or now
# Try HTTP/1.0 Expires header
if b"Expires" in response.headers:
expires = rfc1123_to_epoch(response.headers[b"Expires"])
# When parsing Expires header fails RFC 2616 section 14.21 says we
# should treat this as an expiration time in the past.
return max(0, expires - date) if expires else 0
# Fallback to heuristic using last-modified header
# This is not in RFC but on Firefox caching implementation
lastmodified = rfc1123_to_epoch(response.headers.get(b"Last-Modified"))
if lastmodified and lastmodified <= date:
return (date - lastmodified) / 10
# This request can be cached indefinitely
if response.status in (300, 301, 308):
return self.MAXAGE
# Insufficient information to compute freshness lifetime
return 0
def _compute_current_age(
self, response: Response, request: Request, now: float
) -> float:
# Reference nsHttpResponseHead::ComputeCurrentAge
# https://dxr.mozilla.org/mozilla-central/source/netwerk/protocol/http/nsHttpResponseHead.cpp#658
currentage: float = 0
# If Date header is not set we assume it is a fast connection, and
# clock is in sync with the server
date = rfc1123_to_epoch(response.headers.get(b"Date")) or now
if now > date:
currentage = now - date
if b"Age" in response.headers:
try:
age = int(response.headers[b"Age"]) # type: ignore[arg-type]
currentage = max(currentage, age)
except ValueError:
pass
return currentage
class DbmCacheStorage:
def __init__(self, settings: BaseSettings):
self.cachedir: str = data_path(settings["HTTPCACHE_DIR"], createdir=True)
self.expiration_secs: int = settings.getint("HTTPCACHE_EXPIRATION_SECS")
self.dbmodule: ModuleType = import_module(settings["HTTPCACHE_DBM_MODULE"])
self.db: Any = None # the real type is private
def open_spider(self, spider: Spider) -> None:
dbpath = Path(self.cachedir, f"{spider.name}.db")
self.db = self.dbmodule.open(str(dbpath), "c")
logger.debug(
"Using DBM cache storage in %(cachepath)s",
{"cachepath": dbpath},
extra={"spider": spider},
)
assert spider.crawler.request_fingerprinter
self._fingerprinter: RequestFingerprinterProtocol = (
spider.crawler.request_fingerprinter
)
def close_spider(self, spider: Spider) -> None:
self.db.close()
def retrieve_response(self, spider: Spider, request: Request) -> Response | None:
data = self._read_data(spider, request)
if data is None:
return None # not cached
url = data["url"]
status = data["status"]
headers = Headers(data["headers"])
body = data["body"]
respcls = responsetypes.from_args(headers=headers, url=url, body=body)
return respcls(url=url, headers=headers, status=status, body=body)
def store_response(
self, spider: Spider, request: Request, response: Response
) -> None:
key = self._fingerprinter.fingerprint(request).hex()
data = {
"status": response.status,
"url": response.url,
"headers": dict(response.headers),
"body": response.body,
}
self.db[f"{key}_data"] = pickle.dumps(data, protocol=4)
self.db[f"{key}_time"] = str(time())
def _read_data(self, spider: Spider, request: Request) -> dict[str, Any] | None:
key = self._fingerprinter.fingerprint(request).hex()
db = self.db
tkey = f"{key}_time"
if tkey not in db:
return None # not found
ts = db[tkey]
if 0 < self.expiration_secs < time() - float(ts):
return None # expired
return cast("dict[str, Any]", pickle.loads(db[f"{key}_data"])) # noqa: S301
class FilesystemCacheStorage:
def __init__(self, settings: BaseSettings):
self.cachedir: str = data_path(settings["HTTPCACHE_DIR"])
self.expiration_secs: int = settings.getint("HTTPCACHE_EXPIRATION_SECS")
self.use_gzip: bool = settings.getbool("HTTPCACHE_GZIP")
# https://github.com/python/mypy/issues/10740
self._open: Callable[Concatenate[str | os.PathLike, str, ...], IO[bytes]] = (
gzip.open if self.use_gzip else open # type: ignore[assignment]
)
def open_spider(self, spider: Spider) -> None:
logger.debug(
"Using filesystem cache storage in %(cachedir)s",
{"cachedir": self.cachedir},
extra={"spider": spider},
)
assert spider.crawler.request_fingerprinter
self._fingerprinter = spider.crawler.request_fingerprinter
def close_spider(self, spider: Spider) -> None:
pass
def retrieve_response(self, spider: Spider, request: Request) -> Response | None:
metadata = self._read_meta(spider, request)
if metadata is None:
return None # not cached
rpath = Path(self._get_request_path(spider, request))
with self._open(rpath / "response_body", "rb") as f:
body = f.read()
with self._open(rpath / "response_headers", "rb") as f:
rawheaders = f.read()
url = metadata["response_url"]
status = metadata["status"]
headers = Headers(headers_raw_to_dict(rawheaders))
respcls = responsetypes.from_args(headers=headers, url=url, body=body)
return respcls(url=url, headers=headers, status=status, body=body)
def store_response(
self, spider: Spider, request: Request, response: Response
) -> None:
rpath = Path(self._get_request_path(spider, request))
if not rpath.exists():
rpath.mkdir(parents=True)
metadata = {
"url": request.url,
"method": request.method,
"status": response.status,
"response_url": response.url,
"timestamp": time(),
}
with self._open(rpath / "meta", "wb") as f:
f.write(to_bytes(repr(metadata)))
with self._open(rpath / "pickled_meta", "wb") as f:
pickle.dump(metadata, f, protocol=4)
with self._open(rpath / "response_headers", "wb") as f:
f.write(headers_dict_to_raw(response.headers))
with self._open(rpath / "response_body", "wb") as f:
f.write(response.body)
with self._open(rpath / "request_headers", "wb") as f:
f.write(headers_dict_to_raw(request.headers))
with self._open(rpath / "request_body", "wb") as f:
f.write(request.body)
def _get_request_path(self, spider: Spider, request: Request) -> str:
key = self._fingerprinter.fingerprint(request).hex()
return str(Path(self.cachedir, spider.name, key[0:2], key))
def _read_meta(self, spider: Spider, request: Request) -> dict[str, Any] | None:
rpath = Path(self._get_request_path(spider, request))
metapath = rpath / "pickled_meta"
if not metapath.exists():
return None # not found
mtime = metapath.stat().st_mtime
if 0 < self.expiration_secs < time() - mtime:
return None # expired
with self._open(metapath, "rb") as f:
return cast("dict[str, Any]", pickle.load(f)) # noqa: S301
def parse_cachecontrol(header: bytes) -> dict[bytes, bytes | None]:
directives = {}
for directive in header.split(b","):
key, sep, val = directive.strip().partition(b"=")
if key:
directives[key.lower()] = val if sep else None
return directives
def rfc1123_to_epoch(date_str: str | bytes | None) -> int | None:
try:
date_str = to_unicode(date_str, encoding="ascii") # type: ignore[arg-type]
return mktime_tz(parsedate_tz(date_str)) # type: ignore[arg-type]
except Exception:
return None | --- +++ @@ -331,6 +331,7 @@ pass
def retrieve_response(self, spider: Spider, request: Request) -> Response | None:
+ """Return response if present in cache, or None otherwise."""
metadata = self._read_meta(spider, request)
if metadata is None:
return None # not cached
@@ -348,6 +349,7 @@ def store_response(
self, spider: Spider, request: Request, response: Response
) -> None:
+ """Store the given response in the cache."""
rpath = Path(self._get_request_path(spider, request))
if not rpath.exists():
rpath.mkdir(parents=True)
@@ -388,6 +390,17 @@
def parse_cachecontrol(header: bytes) -> dict[bytes, bytes | None]:
+ """Parse Cache-Control header
+
+ https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9
+
+ >>> parse_cachecontrol(b'public, max-age=3600') == {b'public': None,
+ ... b'max-age': b'3600'}
+ True
+ >>> parse_cachecontrol(b'') == {}
+ True
+
+ """
directives = {}
for directive in header.split(b","):
key, sep, val = directive.strip().partition(b"=")
@@ -401,4 +414,4 @@ date_str = to_unicode(date_str, encoding="ascii") # type: ignore[arg-type]
return mktime_tz(parsedate_tz(date_str)) # type: ignore[arg-type]
except Exception:
- return None+ return None
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/extensions/httpcache.py |
Add inline docstrings for readability | from __future__ import annotations
import logging
from collections import defaultdict
from typing import TYPE_CHECKING, Any
from tldextract import TLDExtract
from scrapy.exceptions import NotConfigured
from scrapy.http import Response
from scrapy.http.cookies import CookieJar
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_unicode
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
from http.cookiejar import Cookie
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy import Request, Spider
from scrapy.crawler import Crawler
from scrapy.http.request import VerboseCookie
logger = logging.getLogger(__name__)
_split_domain = TLDExtract(include_psl_private_domains=True)
_UNSET = object()
def _is_public_domain(domain: str) -> bool:
parts = _split_domain(domain)
return not parts.domain
class CookiesMiddleware:
crawler: Crawler
def __init__(self, debug: bool = False):
self.jars: defaultdict[Any, CookieJar] = defaultdict(CookieJar)
self.debug: bool = debug
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
if not crawler.settings.getbool("COOKIES_ENABLED"):
raise NotConfigured
o = cls(crawler.settings.getbool("COOKIES_DEBUG"))
o.crawler = crawler
return o
def _process_cookies(
self, cookies: Iterable[Cookie], *, jar: CookieJar, request: Request
) -> None:
for cookie in cookies:
cookie_domain = cookie.domain
cookie_domain = cookie_domain.removeprefix(".")
hostname = urlparse_cached(request).hostname
assert hostname is not None
request_domain = hostname.lower()
if cookie_domain and _is_public_domain(cookie_domain):
if cookie_domain != request_domain:
continue
cookie.domain = request_domain
jar.set_cookie_if_ok(cookie, request)
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
if request.meta.get("dont_merge_cookies", False):
return None
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
cookies = self._get_request_cookies(jar, request)
self._process_cookies(cookies, jar=jar, request=request)
# set Cookie header
request.headers.pop("Cookie", None)
jar.add_cookie_header(request)
self._debug_cookie(request)
return None
@_warn_spider_arg
def process_response(
self, request: Request, response: Response, spider: Spider | None = None
) -> Request | Response:
if request.meta.get("dont_merge_cookies", False):
return response
# extract cookies from Set-Cookie and drop invalid/expired cookies
cookiejarkey = request.meta.get("cookiejar")
jar = self.jars[cookiejarkey]
cookies = jar.make_cookies(response, request)
self._process_cookies(cookies, jar=jar, request=request)
self._debug_set_cookie(response)
return response
def _debug_cookie(self, request: Request) -> None:
if self.debug:
cl = [
to_unicode(c, errors="replace")
for c in request.headers.getlist("Cookie")
]
if cl:
cookies = "\n".join(f"Cookie: {c}\n" for c in cl)
msg = f"Sending cookies to: {request}\n{cookies}"
logger.debug(msg, extra={"spider": self.crawler.spider})
def _debug_set_cookie(self, response: Response) -> None:
if self.debug:
cl = [
to_unicode(c, errors="replace")
for c in response.headers.getlist("Set-Cookie")
]
if cl:
cookies = "\n".join(f"Set-Cookie: {c}\n" for c in cl)
msg = f"Received cookies from: {response}\n{cookies}"
logger.debug(msg, extra={"spider": self.crawler.spider})
def _format_cookie(self, cookie: VerboseCookie, request: Request) -> str | None:
decoded = {}
flags = set()
for key in ("name", "value", "path", "domain"):
value = cookie.get(key)
if value is None:
if key in ("name", "value"):
msg = f"Invalid cookie found in request {request}: {cookie} ('{key}' is missing)"
logger.warning(msg)
return None
continue
if isinstance(value, (bool, float, int, str)):
decoded[key] = str(value)
else:
assert isinstance(value, bytes)
try:
decoded[key] = value.decode("utf8")
except UnicodeDecodeError:
logger.warning(
"Non UTF-8 encoded cookie found in request %s: %s",
request,
cookie,
)
decoded[key] = value.decode("latin1", errors="replace")
for flag in ("secure",):
value = cookie.get(flag, _UNSET)
if value is _UNSET or not value:
continue
flags.add(flag)
cookie_str = f"{decoded.pop('name')}={decoded.pop('value')}"
for key, value in decoded.items(): # path, domain
cookie_str += f"; {key.capitalize()}={value}"
for flag in flags: # secure
cookie_str += f"; {flag.capitalize()}"
return cookie_str
def _get_request_cookies(
self, jar: CookieJar, request: Request
) -> Sequence[Cookie]:
if not request.cookies:
return []
cookies: Iterable[VerboseCookie]
if isinstance(request.cookies, dict):
cookies = tuple({"name": k, "value": v} for k, v in request.cookies.items())
else:
cookies = request.cookies
for cookie in cookies:
cookie.setdefault("secure", urlparse_cached(request).scheme == "https")
formatted = filter(None, (self._format_cookie(c, request) for c in cookies))
response = Response(request.url, headers={"Set-Cookie": formatted})
return jar.make_cookies(response, request) | --- +++ @@ -38,6 +38,7 @@
class CookiesMiddleware:
+ """This middleware enables working with sites that need cookies"""
crawler: Crawler
@@ -129,6 +130,10 @@ logger.debug(msg, extra={"spider": self.crawler.spider})
def _format_cookie(self, cookie: VerboseCookie, request: Request) -> str | None:
+ """
+ Given a dict consisting of cookie components, return its string representation.
+ Decode from bytes if necessary.
+ """
decoded = {}
flags = set()
for key in ("name", "value", "path", "domain"):
@@ -167,6 +172,9 @@ def _get_request_cookies(
self, jar: CookieJar, request: Request
) -> Sequence[Cookie]:
+ """
+ Extract cookies from the Request.cookies attribute
+ """
if not request.cookies:
return []
cookies: Iterable[VerboseCookie]
@@ -178,4 +186,4 @@ cookie.setdefault("secure", urlparse_cached(request).scheme == "https")
formatted = filter(None, (self._format_cookie(c, request) for c in cookies))
response = Response(request.url, headers={"Set-Cookie": formatted})
- return jar.make_cookies(response, request)+ return jar.make_cookies(response, request)
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/downloadermiddlewares/cookies.py |
Generate docstrings for script automation |
from __future__ import annotations
from typing import TYPE_CHECKING
from w3lib.http import basic_auth_header
from scrapy import Request, Spider, signals
from scrapy.utils.decorators import _warn_spider_arg
from scrapy.utils.url import url_is_from_any_domain
if TYPE_CHECKING:
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http import Response
class HttpAuthMiddleware:
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
o = cls()
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
return o
def spider_opened(self, spider: Spider) -> None:
usr = getattr(spider, "http_user", "")
pwd = getattr(spider, "http_pass", "")
if usr or pwd:
self.auth = basic_auth_header(usr, pwd)
self.domain = spider.http_auth_domain # type: ignore[attr-defined]
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
auth = getattr(self, "auth", None)
if (
auth
and b"Authorization" not in request.headers
and (not self.domain or url_is_from_any_domain(request.url, [self.domain]))
):
request.headers[b"Authorization"] = auth
return None | --- +++ @@ -1,3 +1,8 @@+"""
+HTTP basic auth downloader middleware
+
+See documentation in docs/topics/downloader-middleware.rst
+"""
from __future__ import annotations
@@ -18,6 +23,8 @@
class HttpAuthMiddleware:
+ """Set Basic HTTP Authorization header
+ (http_user and http_pass spider class attributes)"""
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
@@ -43,4 +50,4 @@ and (not self.domain or url_is_from_any_domain(request.url, [self.domain]))
):
request.headers[b"Authorization"] = auth
- return None+ return None
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/downloadermiddlewares/httpauth.py |
Generate NumPy-style docstrings | from __future__ import annotations
from collections.abc import Mapping
from typing import TYPE_CHECKING, Any, AnyStr, TypeAlias, cast
from w3lib.http import headers_dict_to_raw
from scrapy.utils.datatypes import CaseInsensitiveDict, CaselessDict
from scrapy.utils.python import to_unicode
if TYPE_CHECKING:
from collections.abc import Iterable
# typing.Self requires Python 3.11
from typing_extensions import Self
_RawValue: TypeAlias = bytes | str | int
# isn't fully compatible typing-wise with either dict or CaselessDict,
# but it needs refactoring anyway, see also https://github.com/scrapy/scrapy/pull/5146
class Headers(CaselessDict):
def __init__(
self,
seq: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None,
encoding: str = "utf-8",
):
self.encoding: str = encoding
super().__init__(seq)
def update( # type: ignore[override]
self, seq: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]]
) -> None:
seq = seq.items() if isinstance(seq, Mapping) else seq
iseq: dict[bytes, list[bytes]] = {}
for k, v in seq:
iseq.setdefault(self.normkey(k), []).extend(self.normvalue(v))
super().update(iseq)
def normkey(self, key: AnyStr) -> bytes: # type: ignore[override]
return self._tobytes(key.title())
def normvalue(self, value: _RawValue | Iterable[_RawValue]) -> list[bytes]:
_value: Iterable[_RawValue]
if value is None:
_value = []
elif isinstance(value, (str, bytes)):
_value = [value]
elif hasattr(value, "__iter__"):
_value = value
else:
_value = [value]
return [self._tobytes(x) for x in _value]
def _tobytes(self, x: _RawValue) -> bytes:
if isinstance(x, bytes):
return x
if isinstance(x, str):
return x.encode(self.encoding)
if isinstance(x, int):
return str(x).encode(self.encoding)
raise TypeError(f"Unsupported value type: {type(x)}")
def __getitem__(self, key: AnyStr) -> bytes | None:
try:
return cast("list[bytes]", super().__getitem__(key))[-1]
except IndexError:
return None
def get(self, key: AnyStr, def_val: Any = None) -> bytes | None:
try:
return cast("list[bytes]", super().get(key, def_val))[-1]
except IndexError:
return None
def getlist(self, key: AnyStr, def_val: Any = None) -> list[bytes]:
try:
return cast("list[bytes]", super().__getitem__(key))
except KeyError:
if def_val is not None:
return self.normvalue(def_val)
return []
def setlist(self, key: AnyStr, list_: Iterable[_RawValue]) -> None:
self[key] = list_
def setlistdefault(
self, key: AnyStr, default_list: Iterable[_RawValue] = ()
) -> Any:
return self.setdefault(key, default_list)
def appendlist(self, key: AnyStr, value: Iterable[_RawValue]) -> None:
lst = self.getlist(key)
lst.extend(self.normvalue(value))
self[key] = lst
def items(self) -> Iterable[tuple[bytes, list[bytes]]]: # type: ignore[override]
return ((k, self.getlist(k)) for k in self.keys())
def values(self) -> list[bytes | None]: # type: ignore[override]
return [
self[k]
for k in self.keys() # pylint: disable=consider-using-dict-items
]
def to_string(self) -> bytes:
return headers_dict_to_raw(self)
def to_unicode_dict(self) -> CaseInsensitiveDict:
return CaseInsensitiveDict(
(
to_unicode(key, encoding=self.encoding),
to_unicode(b",".join(value), encoding=self.encoding),
)
for key, value in self.items()
)
def to_tuple_list(self) -> list[tuple[str, str]]:
return [
(key.decode(self.encoding), value.decode(self.encoding))
for key, values in self.items()
for value in values
]
def __copy__(self) -> Self:
return self.__class__(self)
copy = __copy__ | --- +++ @@ -21,6 +21,7 @@ # isn't fully compatible typing-wise with either dict or CaselessDict,
# but it needs refactoring anyway, see also https://github.com/scrapy/scrapy/pull/5146
class Headers(CaselessDict):
+ """Case insensitive http headers dictionary"""
def __init__(
self,
@@ -40,9 +41,11 @@ super().update(iseq)
def normkey(self, key: AnyStr) -> bytes: # type: ignore[override]
+ """Normalize key to bytes"""
return self._tobytes(key.title())
def normvalue(self, value: _RawValue | Iterable[_RawValue]) -> list[bytes]:
+ """Normalize values to bytes"""
_value: Iterable[_RawValue]
if value is None:
_value = []
@@ -110,6 +113,9 @@ return headers_dict_to_raw(self)
def to_unicode_dict(self) -> CaseInsensitiveDict:
+ """Return headers as a CaseInsensitiveDict with str keys
+ and str values. Multiple values are joined with ','.
+ """
return CaseInsensitiveDict(
(
to_unicode(key, encoding=self.encoding),
@@ -119,6 +125,10 @@ )
def to_tuple_list(self) -> list[tuple[str, str]]:
+ """Return headers as a list of ``(key, value)`` tuples.
+
+ Multiple values are represented as multiple tuples with the same key.
+ """
return [
(key.decode(self.encoding), value.decode(self.encoding))
for key, values in self.items()
@@ -128,4 +138,4 @@ def __copy__(self) -> Self:
return self.__class__(self)
- copy = __copy__+ copy = __copy__
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/http/headers.py |
Add docstrings for utility scripts | from __future__ import annotations
import logging
from pathlib import Path
from typing import TYPE_CHECKING
from warnings import warn
from scrapy.exceptions import ScrapyDeprecationWarning
from scrapy.utils.job import job_dir
from scrapy.utils.request import (
RequestFingerprinter,
RequestFingerprinterProtocol,
referer_str,
)
if TYPE_CHECKING:
from twisted.internet.defer import Deferred
# typing.Self requires Python 3.11
from typing_extensions import Self
from scrapy.crawler import Crawler
from scrapy.http.request import Request
from scrapy.spiders import Spider
class BaseDupeFilter:
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls()
def request_seen(self, request: Request) -> bool:
return False
def open(self) -> Deferred[None] | None:
pass
def close(self, reason: str) -> Deferred[None] | None:
pass
def log(self, request: Request, spider: Spider) -> None:
warn(
"Calling BaseDupeFilter.log() is deprecated.",
ScrapyDeprecationWarning,
stacklevel=2,
)
class RFPDupeFilter(BaseDupeFilter):
def __init__(
self,
path: str | None = None,
debug: bool = False,
*,
fingerprinter: RequestFingerprinterProtocol | None = None,
) -> None:
self.file = None
self.fingerprinter: RequestFingerprinterProtocol = (
fingerprinter or RequestFingerprinter()
)
self.fingerprints: set[str] = set()
self.logdupes = True
self.debug = debug
self.logger = logging.getLogger(__name__)
if path:
# line-by-line writing, see: https://github.com/scrapy/scrapy/issues/6019
self.file = Path(path, "requests.seen").open(
"a+", buffering=1, encoding="utf-8"
)
self.file.reconfigure(write_through=True)
self.file.seek(0)
self.fingerprints.update(x.rstrip() for x in self.file)
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
assert crawler.request_fingerprinter
debug = crawler.settings.getbool("DUPEFILTER_DEBUG")
return cls(
job_dir(crawler.settings),
debug,
fingerprinter=crawler.request_fingerprinter,
)
def request_seen(self, request: Request) -> bool:
fp = self.request_fingerprint(request)
if fp in self.fingerprints:
return True
self.fingerprints.add(fp)
if self.file:
self.file.write(fp + "\n")
return False
def request_fingerprint(self, request: Request) -> str:
return self.fingerprinter.fingerprint(request).hex()
def close(self, reason: str) -> None:
if self.file:
self.file.close()
def log(self, request: Request, spider: Spider) -> None:
if self.debug:
msg = "Filtered duplicate request: %(request)s (referer: %(referer)s)"
args = {"request": request, "referer": referer_str(request)}
self.logger.debug(msg, args, extra={"spider": spider})
elif self.logdupes:
msg = (
"Filtered duplicate request: %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)"
)
self.logger.debug(msg, {"request": request}, extra={"spider": spider})
self.logdupes = False
assert spider.crawler.stats
spider.crawler.stats.inc_value("dupefilter/filtered") | --- +++ @@ -25,6 +25,8 @@
class BaseDupeFilter:
+ """Dummy duplicate request filtering class (:setting:`DUPEFILTER_CLASS`)
+ that does not filter out any request."""
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
@@ -40,6 +42,7 @@ pass
def log(self, request: Request, spider: Spider) -> None:
+ """Log that a request has been filtered"""
warn(
"Calling BaseDupeFilter.log() is deprecated.",
ScrapyDeprecationWarning,
@@ -48,6 +51,23 @@
class RFPDupeFilter(BaseDupeFilter):
+ """Duplicate request filtering class (:setting:`DUPEFILTER_CLASS`) that
+ filters out requests with the canonical
+ (:func:`w3lib.url.canonicalize_url`) :attr:`~scrapy.http.Request.url`,
+ :attr:`~scrapy.http.Request.method` and :attr:`~scrapy.http.Request.body`.
+
+ Job directory contents
+ ======================
+
+ .. warning:: The files that this class generates in the :ref:`job directory
+ <job-dir>` are an implementation detail, and may change without a
+ warning in a future version of Scrapy. Do not rely on the following
+ information for anything other than debugging purposes.
+
+ When using :setting:`JOBDIR`, seen fingerprints are tracked in a file named
+ ``requests.seen`` in the :ref:`job directory <job-dir>`, which contains 1
+ request fingerprint per line.
+ """
def __init__(
self,
@@ -93,6 +113,7 @@ return False
def request_fingerprint(self, request: Request) -> str:
+ """Returns a string that uniquely identifies the specified request."""
return self.fingerprinter.fingerprint(request).hex()
def close(self, reason: str) -> None:
@@ -114,4 +135,4 @@ self.logdupes = False
assert spider.crawler.stats
- spider.crawler.stats.inc_value("dupefilter/filtered")+ spider.crawler.stats.inc_value("dupefilter/filtered")
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/dupefilters.py |
Add docstrings for utility scripts |
from __future__ import annotations
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from scrapy.http import Response
# Internal
class NotConfigured(Exception):
class _InvalidOutput(TypeError):
# HTTP and crawling
class IgnoreRequest(Exception):
class DontCloseSpider(Exception):
class CloseSpider(Exception):
def __init__(self, reason: str = "cancelled"):
super().__init__()
self.reason = reason
class StopDownload(Exception):
response: Response | None
def __init__(self, *, fail: bool = True):
super().__init__()
self.fail = fail
class DownloadConnectionRefusedError(Exception):
class CannotResolveHostError(Exception):
class DownloadTimeoutError(Exception):
class DownloadCancelledError(Exception):
class DownloadFailedError(Exception):
class ResponseDataLossError(Exception):
class UnsupportedURLSchemeError(Exception):
# Items
class DropItem(Exception):
def __init__(self, message: str, log_level: str | None = None):
super().__init__(message)
self.log_level = log_level
class NotSupported(Exception):
# Commands
class UsageError(Exception):
def __init__(self, *a: Any, **kw: Any):
self.print_help = kw.pop("print_help", True)
super().__init__(*a, **kw)
class ScrapyDeprecationWarning(Warning):
class ContractFail(AssertionError): | --- +++ @@ -1,3 +1,9 @@+"""
+Scrapy core exceptions
+
+These exceptions are documented in docs/topics/exceptions.rst. Please don't add
+new exceptions here without documenting them there.
+"""
from __future__ import annotations
@@ -10,21 +16,29 @@
class NotConfigured(Exception):
+ """Indicates a missing configuration situation"""
class _InvalidOutput(TypeError):
+ """
+ Indicates an invalid value has been returned by a middleware's processing method.
+ Internal and undocumented, it should not be raised or caught by user code.
+ """
# HTTP and crawling
class IgnoreRequest(Exception):
+ """Indicates a decision was made not to process a request"""
class DontCloseSpider(Exception):
+ """Request the spider not to be closed yet"""
class CloseSpider(Exception):
+ """Raise this from callbacks to request the spider to be closed"""
def __init__(self, reason: str = "cancelled"):
super().__init__()
@@ -32,6 +46,11 @@
class StopDownload(Exception):
+ """
+ Stop the download of the body for a given response.
+ The 'fail' boolean parameter indicates whether or not the resulting partial response
+ should be handled by the request errback. Note that 'fail' is a keyword-only argument.
+ """
response: Response | None
@@ -41,30 +60,38 @@
class DownloadConnectionRefusedError(Exception):
+ """Indicates that a connection was refused by the server."""
class CannotResolveHostError(Exception):
+ """Indicates that the provided hostname cannot be resolved."""
class DownloadTimeoutError(Exception):
+ """Indicates that a request download has timed out."""
class DownloadCancelledError(Exception):
+ """Indicates that a request download was cancelled."""
class DownloadFailedError(Exception):
+ """Indicates that a request download has failed."""
class ResponseDataLossError(Exception):
+ """Indicates that Scrapy couldn't get a complete response."""
class UnsupportedURLSchemeError(Exception):
+ """Indicates that the URL scheme is not supported."""
# Items
class DropItem(Exception):
+ """Drop item from the item pipeline"""
def __init__(self, message: str, log_level: str | None = None):
super().__init__(message)
@@ -72,12 +99,14 @@
class NotSupported(Exception):
+ """Indicates a feature or method is not supported"""
# Commands
class UsageError(Exception):
+ """To indicate a command-line usage error"""
def __init__(self, *a: Any, **kw: Any):
self.print_help = kw.pop("print_help", True)
@@ -85,6 +114,10 @@
class ScrapyDeprecationWarning(Warning):
+ """Warning category for deprecated features, since the default
+ DeprecationWarning is silenced on Python 2.7+
+ """
-class ContractFail(AssertionError):+class ContractFail(AssertionError):
+ """Error raised in case of a failing contract"""
| https://raw.githubusercontent.com/scrapy/scrapy/HEAD/scrapy/exceptions.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.